2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009 Yahoo! Inc.
5 * Copyright (c) 2011-2015 LSI Corp.
6 * Copyright (c) 2013-2015 Avago Technologies
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 /* Communications core for Avago Technologies (LSI) MPT2 */
40 /* TODO Move headers to mpsvar */
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/selinfo.h>
46 #include <sys/module.h>
50 #include <sys/malloc.h>
52 #include <sys/sysctl.h>
53 #include <sys/endian.h>
54 #include <sys/queue.h>
55 #include <sys/kthread.h>
56 #include <sys/taskqueue.h>
59 #include <machine/bus.h>
60 #include <machine/resource.h>
63 #include <machine/stdarg.h>
66 #include <cam/cam_ccb.h>
67 #include <cam/cam_xpt.h>
68 #include <cam/cam_debug.h>
69 #include <cam/cam_sim.h>
70 #include <cam/cam_xpt_sim.h>
71 #include <cam/cam_xpt_periph.h>
72 #include <cam/cam_periph.h>
73 #include <cam/scsi/scsi_all.h>
74 #include <cam/scsi/scsi_message.h>
75 #include <cam/scsi/smp_all.h>
77 #include <dev/mps/mpi/mpi2_type.h>
78 #include <dev/mps/mpi/mpi2.h>
79 #include <dev/mps/mpi/mpi2_ioc.h>
80 #include <dev/mps/mpi/mpi2_sas.h>
81 #include <dev/mps/mpi/mpi2_cnfg.h>
82 #include <dev/mps/mpi/mpi2_init.h>
83 #include <dev/mps/mpi/mpi2_tool.h>
84 #include <dev/mps/mps_ioctl.h>
85 #include <dev/mps/mpsvar.h>
86 #include <dev/mps/mps_table.h>
87 #include <dev/mps/mps_sas.h>
89 #define MPSSAS_DISCOVERY_TIMEOUT 20
90 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
93 * static array to check SCSI OpCode for EEDP protection bits
95 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 static uint8_t op_code_prot[256] = {
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
117 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
124 struct mps_command *cm);
125 static void mpssas_scsiio_timeout(void *data);
126 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
127 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
128 struct mps_command *cm, union ccb *ccb);
129 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
130 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
131 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
132 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
133 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
135 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
136 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137 static void mpssas_async(void *callback_arg, uint32_t code,
138 struct cam_path *path, void *arg);
139 static int mpssas_send_portenable(struct mps_softc *sc);
140 static void mpssas_portenable_complete(struct mps_softc *sc,
141 struct mps_command *cm);
143 struct mpssas_target *
144 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
146 struct mpssas_target *target;
149 for (i = start; i < sassc->maxtargets; i++) {
150 target = &sassc->targets[i];
151 if (target->handle == handle)
158 /* we need to freeze the simq during attach and diag reset, to avoid failing
159 * commands before device handles have been found by discovery. Since
160 * discovery involves reading config pages and possibly sending commands,
161 * discovery actions may continue even after we receive the end of discovery
162 * event, so refcount discovery actions instead of assuming we can unfreeze
163 * the simq when we get the event.
166 mpssas_startup_increment(struct mpssas_softc *sassc)
168 MPS_FUNCTRACE(sassc->sc);
170 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
171 if (sassc->startup_refcount++ == 0) {
172 /* just starting, freeze the simq */
173 mps_dprint(sassc->sc, MPS_INIT,
174 "%s freezing simq\n", __func__);
176 xpt_freeze_simq(sassc->sim, 1);
178 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
179 sassc->startup_refcount);
184 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
186 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
187 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
188 xpt_release_simq(sassc->sim, 1);
189 mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
194 mpssas_startup_decrement(struct mpssas_softc *sassc)
196 MPS_FUNCTRACE(sassc->sc);
198 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
199 if (--sassc->startup_refcount == 0) {
200 /* finished all discovery-related actions, release
201 * the simq and rescan for the latest topology.
203 mps_dprint(sassc->sc, MPS_INIT,
204 "%s releasing simq\n", __func__);
205 sassc->flags &= ~MPSSAS_IN_STARTUP;
206 xpt_release_simq(sassc->sim, 1);
209 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
210 sassc->startup_refcount);
215 * The firmware requires us to stop sending commands when we're doing task
217 * XXX The logic for serializing the device has been made lazy and moved to
218 * mpssas_prepare_for_tm().
221 mpssas_alloc_tm(struct mps_softc *sc)
223 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
224 struct mps_command *tm;
226 tm = mps_alloc_high_priority_command(sc);
230 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
231 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
236 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
238 int target_id = 0xFFFFFFFF;
244 * For TM's the devq is frozen for the device. Unfreeze it here and
245 * free the resources used for freezing the devq. Must clear the
246 * INRESET flag as well or scsi I/O will not work.
248 if (tm->cm_targ != NULL) {
249 tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
250 target_id = tm->cm_targ->tid;
253 mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
255 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
256 xpt_free_path(tm->cm_ccb->ccb_h.path);
257 xpt_free_ccb(tm->cm_ccb);
260 mps_free_high_priority_command(sc, tm);
264 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
266 struct mpssas_softc *sassc = sc->sassc;
268 target_id_t targetid;
272 pathid = cam_sim_path(sassc->sim);
274 targetid = CAM_TARGET_WILDCARD;
276 targetid = targ - sassc->targets;
279 * Allocate a CCB and schedule a rescan.
281 ccb = xpt_alloc_ccb_nowait();
283 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
287 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
288 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
289 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
294 if (targetid == CAM_TARGET_WILDCARD)
295 ccb->ccb_h.func_code = XPT_SCAN_BUS;
297 ccb->ccb_h.func_code = XPT_SCAN_TGT;
299 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
304 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
314 /* No need to be in here if debugging isn't enabled */
315 if ((cm->cm_sc->mps_debug & level) == 0)
318 sbuf_new(&sb, str, sizeof(str), 0);
322 if (cm->cm_ccb != NULL) {
323 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
325 sbuf_cat(&sb, path_str);
326 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
327 scsi_command_string(&cm->cm_ccb->csio, &sb);
328 sbuf_printf(&sb, "length %d ",
329 cm->cm_ccb->csio.dxfer_len);
333 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
334 cam_sim_name(cm->cm_sc->sassc->sim),
335 cam_sim_unit(cm->cm_sc->sassc->sim),
336 cam_sim_bus(cm->cm_sc->sassc->sim),
337 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
341 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
342 sbuf_vprintf(&sb, fmt, ap);
344 mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
351 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
353 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
354 struct mpssas_target *targ;
359 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
360 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
364 /* XXX retry the remove after the diag reset completes? */
365 mps_dprint(sc, MPS_FAULT,
366 "%s NULL reply resetting device 0x%04x\n", __func__,
368 mpssas_free_tm(sc, tm);
372 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
373 MPI2_IOCSTATUS_SUCCESS) {
374 mps_dprint(sc, MPS_ERROR,
375 "IOCStatus = 0x%x while resetting device 0x%x\n",
376 le16toh(reply->IOCStatus), handle);
379 mps_dprint(sc, MPS_XINFO,
380 "Reset aborted %u commands\n", reply->TerminationCount);
381 mps_free_reply(sc, tm->cm_reply_data);
382 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
384 mps_dprint(sc, MPS_XINFO,
385 "clearing target %u handle 0x%04x\n", targ->tid, handle);
388 * Don't clear target if remove fails because things will get confusing.
389 * Leave the devname and sasaddr intact so that we know to avoid reusing
390 * this target id if possible, and so we can assign the same target id
391 * to this device if it comes back in the future.
393 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
394 MPI2_IOCSTATUS_SUCCESS) {
397 targ->encl_handle = 0x0;
398 targ->encl_slot = 0x0;
399 targ->exp_dev_handle = 0x0;
401 targ->linkrate = 0x0;
406 mpssas_free_tm(sc, tm);
411 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
412 * Otherwise Volume Delete is same as Bare Drive Removal.
415 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
417 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
418 struct mps_softc *sc;
419 struct mps_command *tm;
420 struct mpssas_target *targ = NULL;
422 MPS_FUNCTRACE(sassc->sc);
427 * If this is a WD controller, determine if the disk should be exposed
428 * to the OS or not. If disk should be exposed, return from this
429 * function without doing anything.
431 if (sc->WD_available && (sc->WD_hide_expose ==
432 MPS_WD_EXPOSE_ALWAYS)) {
437 targ = mpssas_find_target_by_handle(sassc, 0, handle);
439 /* FIXME: what is the action? */
440 /* We don't know about this device? */
441 mps_dprint(sc, MPS_ERROR,
442 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446 targ->flags |= MPSSAS_TARGET_INREMOVAL;
448 tm = mpssas_alloc_tm(sc);
450 mps_dprint(sc, MPS_ERROR,
451 "%s: command alloc failure\n", __func__);
455 mpssas_rescan_target(sc, targ);
457 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
458 req->DevHandle = targ->handle;
459 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
461 /* SAS Hard Link Reset / SATA Link Reset */
462 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
466 tm->cm_complete = mpssas_remove_volume;
467 tm->cm_complete_data = (void *)(uintptr_t)handle;
469 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
470 __func__, targ->tid);
471 mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
473 mps_map_command(sc, tm);
477 * The MPT2 firmware performs debounce on the link to avoid transient link
478 * errors and false removals. When it does decide that link has been lost
479 * and a device need to go away, it expects that the host will perform a
480 * target reset and then an op remove. The reset has the side-effect of
481 * aborting any outstanding requests for the device, which is required for
482 * the op-remove to succeed. It's not clear if the host should check for
483 * the device coming back alive after the reset.
486 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
488 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
489 struct mps_softc *sc;
490 struct mps_command *cm;
491 struct mpssas_target *targ = NULL;
493 MPS_FUNCTRACE(sassc->sc);
497 targ = mpssas_find_target_by_handle(sassc, 0, handle);
499 /* FIXME: what is the action? */
500 /* We don't know about this device? */
501 mps_dprint(sc, MPS_ERROR,
502 "%s : invalid handle 0x%x \n", __func__, handle);
506 targ->flags |= MPSSAS_TARGET_INREMOVAL;
508 cm = mpssas_alloc_tm(sc);
510 mps_dprint(sc, MPS_ERROR,
511 "%s: command alloc failure\n", __func__);
515 mpssas_rescan_target(sc, targ);
517 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
518 memset(req, 0, sizeof(*req));
519 req->DevHandle = htole16(targ->handle);
520 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
522 /* SAS Hard Link Reset / SATA Link Reset */
523 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
527 cm->cm_complete = mpssas_remove_device;
528 cm->cm_complete_data = (void *)(uintptr_t)handle;
530 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
531 __func__, targ->tid);
532 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
534 mps_map_command(sc, cm);
538 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
540 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
541 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
542 struct mpssas_target *targ;
547 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
548 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
552 * Currently there should be no way we can hit this case. It only
553 * happens when we have a failure to allocate chain frames, and
554 * task management commands don't have S/G lists.
556 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
557 mps_dprint(sc, MPS_ERROR,
558 "%s: cm_flags = %#x for remove of handle %#04x! "
559 "This should not happen!\n", __func__, tm->cm_flags,
564 /* XXX retry the remove after the diag reset completes? */
565 mps_dprint(sc, MPS_FAULT,
566 "%s NULL reply resetting device 0x%04x\n", __func__,
568 mpssas_free_tm(sc, tm);
572 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
573 MPI2_IOCSTATUS_SUCCESS) {
574 mps_dprint(sc, MPS_ERROR,
575 "IOCStatus = 0x%x while resetting device 0x%x\n",
576 le16toh(reply->IOCStatus), handle);
579 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
580 le32toh(reply->TerminationCount));
581 mps_free_reply(sc, tm->cm_reply_data);
582 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
584 /* Reuse the existing command */
585 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
586 memset(req, 0, sizeof(*req));
587 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
588 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
589 req->DevHandle = htole16(handle);
591 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
592 tm->cm_complete = mpssas_remove_complete;
593 tm->cm_complete_data = (void *)(uintptr_t)handle;
596 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
597 * They should be aborted or time out and we'll kick thus off there
600 if (TAILQ_FIRST(&targ->commands) == NULL) {
601 mps_dprint(sc, MPS_INFO, "No pending commands: starting remove_device\n");
602 mps_map_command(sc, tm);
603 targ->pending_remove_tm = NULL;
605 targ->pending_remove_tm = tm;
609 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
614 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
616 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
618 struct mpssas_target *targ;
619 struct mpssas_lun *lun;
623 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
624 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
628 * At this point, we should have no pending commands for the target.
629 * The remove target has just completed.
631 KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
632 ("%s: no commands should be pending\n", __func__));
636 * Currently there should be no way we can hit this case. It only
637 * happens when we have a failure to allocate chain frames, and
638 * task management commands don't have S/G lists.
640 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
641 mps_dprint(sc, MPS_XINFO,
642 "%s: cm_flags = %#x for remove of handle %#04x! "
643 "This should not happen!\n", __func__, tm->cm_flags,
645 mpssas_free_tm(sc, tm);
650 /* most likely a chip reset */
651 mps_dprint(sc, MPS_FAULT,
652 "%s NULL reply removing device 0x%04x\n", __func__, handle);
653 mpssas_free_tm(sc, tm);
657 mps_dprint(sc, MPS_XINFO,
658 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
659 handle, le16toh(reply->IOCStatus));
662 * Don't clear target if remove fails because things will get confusing.
663 * Leave the devname and sasaddr intact so that we know to avoid reusing
664 * this target id if possible, and so we can assign the same target id
665 * to this device if it comes back in the future.
667 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
668 MPI2_IOCSTATUS_SUCCESS) {
670 targ->encl_handle = 0x0;
671 targ->encl_slot = 0x0;
672 targ->exp_dev_handle = 0x0;
674 targ->linkrate = 0x0;
678 while(!SLIST_EMPTY(&targ->luns)) {
679 lun = SLIST_FIRST(&targ->luns);
680 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
686 mpssas_free_tm(sc, tm);
690 mpssas_register_events(struct mps_softc *sc)
692 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
695 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
696 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
697 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
698 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
699 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
700 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
701 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
702 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
703 setbit(events, MPI2_EVENT_IR_VOLUME);
704 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
705 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
706 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
708 mps_register_events(sc, events, mpssas_evt_handler, NULL,
709 &sc->sassc->mpssas_eh);
715 mps_attach_sas(struct mps_softc *sc)
717 struct mpssas_softc *sassc;
719 int unit, error = 0, reqs;
722 mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
724 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
726 mps_dprint(sc, MPS_INIT|MPS_ERROR,
727 "Cannot allocate SAS controller memory\n");
732 * XXX MaxTargets could change during a reinit. Since we don't
733 * resize the targets[] array during such an event, cache the value
734 * of MaxTargets here so that we don't get into trouble later. This
735 * should move into the reinit logic.
737 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
738 sassc->targets = malloc(sizeof(struct mpssas_target) *
739 sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
740 if(!sassc->targets) {
741 mps_dprint(sc, MPS_INIT|MPS_ERROR,
742 "Cannot allocate SAS target memory\n");
749 reqs = sc->num_reqs - sc->num_prireqs - 1;
750 if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
751 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
756 unit = device_get_unit(sc->mps_dev);
757 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
758 unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
759 if (sassc->sim == NULL) {
760 mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
765 TAILQ_INIT(&sassc->ev_queue);
767 /* Initialize taskqueue for Event Handling */
768 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
769 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
770 taskqueue_thread_enqueue, &sassc->ev_tq);
771 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
772 device_get_nameunit(sc->mps_dev));
777 * XXX There should be a bus for every port on the adapter, but since
778 * we're just going to fake the topology for now, we'll pretend that
779 * everything is just a target on a single bus.
781 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
782 mps_dprint(sc, MPS_INIT|MPS_ERROR,
783 "Error %d registering SCSI bus\n", error);
789 * Assume that discovery events will start right away.
791 * Hold off boot until discovery is complete.
793 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
794 sc->sassc->startup_refcount = 0;
795 mpssas_startup_increment(sassc);
797 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
800 * Register for async events so we can determine the EEDP
801 * capabilities of devices.
803 status = xpt_create_path(&sassc->path, /*periph*/NULL,
804 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
806 if (status != CAM_REQ_CMP) {
807 mps_dprint(sc, MPS_ERROR|MPS_INIT,
808 "Error %#x creating sim path\n", status);
813 event = AC_ADVINFO_CHANGED;
814 status = xpt_register_async(event, mpssas_async, sc,
816 if (status != CAM_REQ_CMP) {
817 mps_dprint(sc, MPS_ERROR,
818 "Error %#x registering async handler for "
819 "AC_ADVINFO_CHANGED events\n", status);
820 xpt_free_path(sassc->path);
824 if (status != CAM_REQ_CMP) {
826 * EEDP use is the exception, not the rule.
827 * Warn the user, but do not fail to attach.
829 mps_printf(sc, "EEDP capabilities disabled.\n");
834 mpssas_register_events(sc);
839 mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
844 mps_detach_sas(struct mps_softc *sc)
846 struct mpssas_softc *sassc;
847 struct mpssas_lun *lun, *lun_tmp;
848 struct mpssas_target *targ;
853 if (sc->sassc == NULL)
857 mps_deregister_events(sc, sassc->mpssas_eh);
860 * Drain and free the event handling taskqueue with the lock
861 * unheld so that any parallel processing tasks drain properly
862 * without deadlocking.
864 if (sassc->ev_tq != NULL)
865 taskqueue_free(sassc->ev_tq);
867 /* Make sure CAM doesn't wedge if we had to bail out early. */
870 while (sassc->startup_refcount != 0)
871 mpssas_startup_decrement(sassc);
873 /* Deregister our async handler */
874 if (sassc->path != NULL) {
875 xpt_register_async(0, mpssas_async, sc, sassc->path);
876 xpt_free_path(sassc->path);
880 if (sassc->flags & MPSSAS_IN_STARTUP)
881 xpt_release_simq(sassc->sim, 1);
883 if (sassc->sim != NULL) {
884 xpt_bus_deregister(cam_sim_path(sassc->sim));
885 cam_sim_free(sassc->sim, FALSE);
890 if (sassc->devq != NULL)
891 cam_simq_free(sassc->devq);
893 for(i=0; i< sassc->maxtargets ;i++) {
894 targ = &sassc->targets[i];
895 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
899 free(sassc->targets, M_MPT2);
907 mpssas_discovery_end(struct mpssas_softc *sassc)
909 struct mps_softc *sc = sassc->sc;
913 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
914 callout_stop(&sassc->discovery_callout);
917 * After discovery has completed, check the mapping table for any
918 * missing devices and update their missing counts. Only do this once
919 * whenever the driver is initialized so that missing counts aren't
920 * updated unnecessarily. Note that just because discovery has
921 * completed doesn't mean that events have been processed yet. The
922 * check_devices function is a callout timer that checks if ALL devices
923 * are missing. If so, it will wait a little longer for events to
924 * complete and keep resetting itself until some device in the mapping
925 * table is not missing, meaning that event processing has started.
927 if (sc->track_mapping_events) {
928 mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
929 "completed. Check for missing devices in the mapping "
931 callout_reset(&sc->device_check_callout,
932 MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
938 mpssas_action(struct cam_sim *sim, union ccb *ccb)
940 struct mpssas_softc *sassc;
942 sassc = cam_sim_softc(sim);
944 MPS_FUNCTRACE(sassc->sc);
945 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
946 ccb->ccb_h.func_code);
947 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
949 switch (ccb->ccb_h.func_code) {
952 struct ccb_pathinq *cpi = &ccb->cpi;
953 struct mps_softc *sc = sassc->sc;
955 cpi->version_num = 1;
956 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
957 cpi->target_sprt = 0;
958 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
959 cpi->hba_eng_cnt = 0;
960 cpi->max_target = sassc->maxtargets - 1;
964 * initiator_id is set here to an ID outside the set of valid
965 * target IDs (including volumes).
967 cpi->initiator_id = sassc->maxtargets;
968 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
969 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
970 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
971 cpi->unit_number = cam_sim_unit(sim);
972 cpi->bus_id = cam_sim_bus(sim);
973 cpi->base_transfer_speed = 150000;
974 cpi->transport = XPORT_SAS;
975 cpi->transport_version = 0;
976 cpi->protocol = PROTO_SCSI;
977 cpi->protocol_version = SCSI_REV_SPC;
978 cpi->maxio = sc->maxio;
979 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
982 case XPT_GET_TRAN_SETTINGS:
984 struct ccb_trans_settings *cts;
985 struct ccb_trans_settings_sas *sas;
986 struct ccb_trans_settings_scsi *scsi;
987 struct mpssas_target *targ;
990 sas = &cts->xport_specific.sas;
991 scsi = &cts->proto_specific.scsi;
993 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
994 ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
995 cts->ccb_h.target_id));
996 targ = &sassc->targets[cts->ccb_h.target_id];
997 if (targ->handle == 0x0) {
998 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1002 cts->protocol_version = SCSI_REV_SPC2;
1003 cts->transport = XPORT_SAS;
1004 cts->transport_version = 0;
1006 sas->valid = CTS_SAS_VALID_SPEED;
1007 switch (targ->linkrate) {
1009 sas->bitrate = 150000;
1012 sas->bitrate = 300000;
1015 sas->bitrate = 600000;
1021 cts->protocol = PROTO_SCSI;
1022 scsi->valid = CTS_SCSI_VALID_TQ;
1023 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1025 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1028 case XPT_CALC_GEOMETRY:
1029 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1030 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1033 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1034 mpssas_action_resetdev(sassc, ccb);
1039 mps_dprint(sassc->sc, MPS_XINFO,
1040 "mpssas_action faking success for abort or reset\n");
1041 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1044 mpssas_action_scsiio(sassc, ccb);
1047 mpssas_action_smpio(sassc, ccb);
1050 mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1058 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1059 target_id_t target_id, lun_id_t lun_id)
1061 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1062 struct cam_path *path;
1064 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1065 ac_code, target_id, (uintmax_t)lun_id);
1067 if (xpt_create_path(&path, NULL,
1068 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1069 mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1074 xpt_async(ac_code, path, NULL);
1075 xpt_free_path(path);
1079 mpssas_complete_all_commands(struct mps_softc *sc)
1081 struct mps_command *cm;
1086 mtx_assert(&sc->mps_mtx, MA_OWNED);
1088 /* complete all commands with a NULL reply */
1089 for (i = 1; i < sc->num_reqs; i++) {
1090 cm = &sc->commands[i];
1091 if (cm->cm_state == MPS_CM_STATE_FREE)
1094 cm->cm_state = MPS_CM_STATE_BUSY;
1095 cm->cm_reply = NULL;
1098 if (cm->cm_flags & MPS_CM_FLAGS_SATA_ID_TIMEOUT) {
1100 free(cm->cm_data, M_MPT2);
1104 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1105 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1107 if (cm->cm_complete != NULL) {
1108 mpssas_log_command(cm, MPS_RECOVERY,
1109 "completing cm %p state %x ccb %p for diag reset\n",
1110 cm, cm->cm_state, cm->cm_ccb);
1112 cm->cm_complete(sc, cm);
1114 } else if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1115 mpssas_log_command(cm, MPS_RECOVERY,
1116 "waking up cm %p state %x ccb %p for diag reset\n",
1117 cm, cm->cm_state, cm->cm_ccb);
1122 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1123 /* this should never happen, but if it does, log */
1124 mpssas_log_command(cm, MPS_RECOVERY,
1125 "cm %p state %x flags 0x%x ccb %p during diag "
1126 "reset\n", cm, cm->cm_state, cm->cm_flags,
1131 sc->io_cmds_active = 0;
1135 mpssas_handle_reinit(struct mps_softc *sc)
1139 /* Go back into startup mode and freeze the simq, so that CAM
1140 * doesn't send any commands until after we've rediscovered all
1141 * targets and found the proper device handles for them.
1143 * After the reset, portenable will trigger discovery, and after all
1144 * discovery-related activities have finished, the simq will be
1147 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1148 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1149 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1150 mpssas_startup_increment(sc->sassc);
1152 /* notify CAM of a bus reset */
1153 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1156 /* complete and cleanup after all outstanding commands */
1157 mpssas_complete_all_commands(sc);
1159 mps_dprint(sc, MPS_INIT,
1160 "%s startup %u after command completion\n", __func__,
1161 sc->sassc->startup_refcount);
1163 /* zero all the target handles, since they may change after the
1164 * reset, and we have to rediscover all the targets and use the new
1167 for (i = 0; i < sc->sassc->maxtargets; i++) {
1168 if (sc->sassc->targets[i].outstanding != 0)
1169 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1170 i, sc->sassc->targets[i].outstanding);
1171 sc->sassc->targets[i].handle = 0x0;
1172 sc->sassc->targets[i].exp_dev_handle = 0x0;
1173 sc->sassc->targets[i].outstanding = 0;
1174 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1179 mpssas_tm_timeout(void *data)
1181 struct mps_command *tm = data;
1182 struct mps_softc *sc = tm->cm_sc;
1184 mtx_assert(&sc->mps_mtx, MA_OWNED);
1186 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1187 "task mgmt %p timed out\n", tm);
1189 KASSERT(tm->cm_state == MPS_CM_STATE_INQUEUE,
1190 ("command not inqueue\n"));
1192 tm->cm_state = MPS_CM_STATE_BUSY;
1197 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1199 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1200 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1201 unsigned int cm_count = 0;
1202 struct mps_command *cm;
1203 struct mpssas_target *targ;
1205 callout_stop(&tm->cm_callout);
1207 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1208 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1212 * Currently there should be no way we can hit this case. It only
1213 * happens when we have a failure to allocate chain frames, and
1214 * task management commands don't have S/G lists.
1215 * XXXSL So should it be an assertion?
1217 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1218 mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
1219 "%s: cm_flags = %#x for LUN reset! "
1220 "This should not happen!\n", __func__, tm->cm_flags);
1221 mpssas_free_tm(sc, tm);
1225 if (reply == NULL) {
1226 mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
1228 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1229 /* this completion was due to a reset, just cleanup */
1230 mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1231 "reset, ignoring NULL LUN reset reply\n");
1233 mpssas_free_tm(sc, tm);
1236 /* we should have gotten a reply. */
1237 mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1238 "LUN reset attempt, resetting controller\n");
1244 mps_dprint(sc, MPS_RECOVERY,
1245 "logical unit reset status 0x%x code 0x%x count %u\n",
1246 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1247 le32toh(reply->TerminationCount));
1250 * See if there are any outstanding commands for this LUN.
1251 * This could be made more efficient by using a per-LU data
1252 * structure of some sort.
1254 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1255 if (cm->cm_lun == tm->cm_lun)
1259 if (cm_count == 0) {
1260 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1261 "Finished recovery after LUN reset for target %u\n",
1264 mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
1267 * We've finished recovery for this logical unit. check and
1268 * see if some other logical unit has a timedout command
1269 * that needs to be processed.
1271 cm = TAILQ_FIRST(&targ->timedout_commands);
1273 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1274 "More commands to abort for target %u\n",
1276 mpssas_send_abort(sc, tm, cm);
1279 mpssas_free_tm(sc, tm);
1283 * If we still have commands for this LUN, the reset
1284 * effectively failed, regardless of the status reported.
1285 * Escalate to a target reset.
1287 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1288 "logical unit reset complete for target %u, but still "
1289 "have %u command(s), sending target reset\n", targ->tid,
1291 mpssas_send_reset(sc, tm,
1292 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1297 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1299 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1300 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1301 struct mpssas_target *targ;
1303 callout_stop(&tm->cm_callout);
1305 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1306 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1310 * Currently there should be no way we can hit this case. It only
1311 * happens when we have a failure to allocate chain frames, and
1312 * task management commands don't have S/G lists.
1314 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1315 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1316 "This should not happen!\n", __func__, tm->cm_flags);
1317 mpssas_free_tm(sc, tm);
1321 if (reply == NULL) {
1322 mps_dprint(sc, MPS_RECOVERY,
1323 "NULL target reset reply for tm %pi TaskMID %u\n",
1324 tm, le16toh(req->TaskMID));
1325 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1326 /* this completion was due to a reset, just cleanup */
1327 mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1328 "reset, ignoring NULL target reset reply\n");
1330 mpssas_free_tm(sc, tm);
1332 /* we should have gotten a reply. */
1333 mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1334 "target reset attempt, resetting controller\n");
1340 mps_dprint(sc, MPS_RECOVERY,
1341 "target reset status 0x%x code 0x%x count %u\n",
1342 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1343 le32toh(reply->TerminationCount));
1345 if (targ->outstanding == 0) {
1346 /* we've finished recovery for this target and all
1347 * of its logical units.
1349 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1350 "Finished reset recovery for target %u\n", targ->tid);
1352 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1356 mpssas_free_tm(sc, tm);
1359 * After a target reset, if this target still has
1360 * outstanding commands, the reset effectively failed,
1361 * regardless of the status reported. escalate.
1363 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1364 "Target reset complete for target %u, but still have %u "
1365 "command(s), resetting controller\n", targ->tid,
1371 #define MPS_RESET_TIMEOUT 30
1374 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1376 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1377 struct mpssas_target *target;
1380 target = tm->cm_targ;
1381 if (target->handle == 0) {
1382 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1383 __func__, target->tid);
1387 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1388 req->DevHandle = htole16(target->handle);
1389 req->TaskType = type;
1391 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1392 /* XXX Need to handle invalid LUNs */
1393 MPS_SET_LUN(req->LUN, tm->cm_lun);
1394 tm->cm_targ->logical_unit_resets++;
1395 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1396 "Sending logical unit reset to target %u lun %d\n",
1397 target->tid, tm->cm_lun);
1398 tm->cm_complete = mpssas_logical_unit_reset_complete;
1399 mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1400 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1402 * Target reset method =
1403 * SAS Hard Link Reset / SATA Link Reset
1405 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1406 tm->cm_targ->target_resets++;
1407 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1408 "Sending target reset to target %u\n", target->tid);
1409 tm->cm_complete = mpssas_target_reset_complete;
1410 mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1412 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1417 tm->cm_complete_data = (void *)tm;
1419 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1420 mpssas_tm_timeout, tm);
1422 err = mps_map_command(sc, tm);
1424 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1425 "error %d sending reset type %u\n",
1433 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1435 struct mps_command *cm;
1436 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1437 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1438 struct mpssas_target *targ;
1440 callout_stop(&tm->cm_callout);
1442 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1443 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1447 * Currently there should be no way we can hit this case. It only
1448 * happens when we have a failure to allocate chain frames, and
1449 * task management commands don't have S/G lists.
1451 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1452 mps_dprint(sc, MPS_RECOVERY,
1453 "cm_flags = %#x for abort %p TaskMID %u!\n",
1454 tm->cm_flags, tm, le16toh(req->TaskMID));
1455 mpssas_free_tm(sc, tm);
1459 if (reply == NULL) {
1460 mps_dprint(sc, MPS_RECOVERY,
1461 "NULL abort reply for tm %p TaskMID %u\n",
1462 tm, le16toh(req->TaskMID));
1463 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1464 /* this completion was due to a reset, just cleanup */
1465 mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1466 "reset, ignoring NULL abort reply\n");
1468 mpssas_free_tm(sc, tm);
1470 /* we should have gotten a reply. */
1471 mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1472 "abort attempt, resetting controller\n");
1478 mps_dprint(sc, MPS_RECOVERY,
1479 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1480 le16toh(req->TaskMID),
1481 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1482 le32toh(reply->TerminationCount));
1484 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1487 * If there are no more timedout commands, we're done with
1488 * error recovery for this target.
1490 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1491 "Finished abort recovery for target %u\n", targ->tid);
1494 mpssas_free_tm(sc, tm);
1495 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1496 /* abort success, but we have more timedout commands to abort */
1497 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1498 "Continuing abort recovery for target %u\n", targ->tid);
1500 mpssas_send_abort(sc, tm, cm);
1502 /* we didn't get a command completion, so the abort
1503 * failed as far as we're concerned. escalate.
1505 mps_dprint(sc, MPS_RECOVERY,
1506 "Abort failed for target %u, sending logical unit reset\n",
1509 mpssas_send_reset(sc, tm,
1510 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1514 #define MPS_ABORT_TIMEOUT 5
1517 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1519 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1520 struct mpssas_target *targ;
1524 if (targ->handle == 0) {
1525 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1526 "%s null devhandle for target_id %d\n",
1527 __func__, cm->cm_ccb->ccb_h.target_id);
1531 mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1532 "Aborting command %p\n", cm);
1534 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1535 req->DevHandle = htole16(targ->handle);
1536 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1538 /* XXX Need to handle invalid LUNs */
1539 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1541 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1544 tm->cm_complete = mpssas_abort_complete;
1545 tm->cm_complete_data = (void *)tm;
1546 tm->cm_targ = cm->cm_targ;
1547 tm->cm_lun = cm->cm_lun;
1549 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1550 mpssas_tm_timeout, tm);
1554 mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1556 err = mps_map_command(sc, tm);
1558 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1559 "error %d sending abort for cm %p SMID %u\n",
1560 err, cm, req->TaskMID);
1565 mpssas_scsiio_timeout(void *data)
1567 sbintime_t elapsed, now;
1569 struct mps_softc *sc;
1570 struct mps_command *cm;
1571 struct mpssas_target *targ;
1573 cm = (struct mps_command *)data;
1579 mtx_assert(&sc->mps_mtx, MA_OWNED);
1581 mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", sc);
1584 * Run the interrupt handler to make sure it's not pending. This
1585 * isn't perfect because the command could have already completed
1586 * and been re-used, though this is unlikely.
1588 mps_intr_locked(sc);
1589 if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
1590 mpssas_log_command(cm, MPS_XINFO,
1591 "SCSI command %p almost timed out\n", cm);
1595 if (cm->cm_ccb == NULL) {
1596 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1603 elapsed = now - ccb->ccb_h.qos.sim_data;
1604 mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
1605 "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
1606 targ->tid, targ->handle, ccb->ccb_h.timeout,
1607 sbintime_getsec(elapsed), elapsed & 0xffffffff);
1609 /* XXX first, check the firmware state, to see if it's still
1610 * operational. if not, do a diag reset.
1612 mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1613 cm->cm_flags |= MPS_CM_FLAGS_ON_RECOVERY | MPS_CM_FLAGS_TIMEDOUT;
1614 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1616 if (targ->tm != NULL) {
1617 /* target already in recovery, just queue up another
1618 * timedout command to be processed later.
1620 mps_dprint(sc, MPS_RECOVERY,
1621 "queued timedout cm %p for processing by tm %p\n",
1623 } else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1624 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1625 "Sending abort to target %u for SMID %d\n", targ->tid,
1626 cm->cm_desc.Default.SMID);
1627 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1630 /* start recovery by aborting the first timedout command */
1631 mpssas_send_abort(sc, targ->tm, cm);
1633 /* XXX queue this target up for recovery once a TM becomes
1634 * available. The firmware only has a limited number of
1635 * HighPriority credits for the high priority requests used
1636 * for task management, and we ran out.
1638 * Isilon: don't worry about this for now, since we have
1639 * more credits than disks in an enclosure, and limit
1640 * ourselves to one TM per target for recovery.
1642 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1643 "timedout cm %p failed to allocate a tm\n", cm);
1649 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1651 MPI2_SCSI_IO_REQUEST *req;
1652 struct ccb_scsiio *csio;
1653 struct mps_softc *sc;
1654 struct mpssas_target *targ;
1655 struct mpssas_lun *lun;
1656 struct mps_command *cm;
1657 uint8_t i, lba_byte, *ref_tag_addr;
1658 uint16_t eedp_flags;
1659 uint32_t mpi_control;
1663 mtx_assert(&sc->mps_mtx, MA_OWNED);
1666 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1667 ("Target %d out of bounds in XPT_SCSI_IO\n",
1668 csio->ccb_h.target_id));
1669 targ = &sassc->targets[csio->ccb_h.target_id];
1670 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1671 if (targ->handle == 0x0) {
1672 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1673 __func__, csio->ccb_h.target_id);
1674 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1678 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1679 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1680 "supported %u\n", __func__, csio->ccb_h.target_id);
1681 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1686 * Sometimes, it is possible to get a command that is not "In
1687 * Progress" and was actually aborted by the upper layer. Check for
1688 * this here and complete the command without error.
1690 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1691 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1692 "target %u\n", __func__, csio->ccb_h.target_id);
1697 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1698 * that the volume has timed out. We want volumes to be enumerated
1699 * until they are deleted/removed, not just failed. In either event,
1700 * we're removing the target due to a firmware event telling us
1701 * the device is now gone (as opposed to some transient event). Since
1702 * we're opting to remove failed devices from the OS's view, we need
1703 * to propagate that status up the stack.
1705 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1706 if (targ->devinfo == 0)
1707 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1709 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1714 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1715 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1716 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1722 * If target has a reset in progress, freeze the devq and return. The
1723 * devq will be released when the TM reset is finished.
1725 if (targ->flags & MPSSAS_TARGET_INRESET) {
1726 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1727 mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1728 __func__, targ->tid);
1729 xpt_freeze_devq(ccb->ccb_h.path, 1);
1734 cm = mps_alloc_command(sc);
1735 if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1737 mps_free_command(sc, cm);
1739 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1740 xpt_freeze_simq(sassc->sim, 1);
1741 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1743 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1744 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1749 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1750 bzero(req, sizeof(*req));
1751 req->DevHandle = htole16(targ->handle);
1752 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1754 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1755 req->SenseBufferLength = MPS_SENSE_LEN;
1757 req->ChainOffset = 0;
1758 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1763 req->DataLength = htole32(csio->dxfer_len);
1764 req->BidirectionalDataLength = 0;
1765 req->IoFlags = htole16(csio->cdb_len);
1768 /* Note: BiDirectional transfers are not supported */
1769 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1771 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1772 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1775 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1776 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1780 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1784 if (csio->cdb_len == 32)
1785 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1787 * It looks like the hardware doesn't require an explicit tag
1788 * number for each transaction. SAM Task Management not supported
1791 switch (csio->tag_action) {
1792 case MSG_HEAD_OF_Q_TAG:
1793 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1795 case MSG_ORDERED_Q_TAG:
1796 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1799 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1801 case CAM_TAG_ACTION_NONE:
1802 case MSG_SIMPLE_Q_TAG:
1804 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1807 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1808 req->Control = htole32(mpi_control);
1809 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1810 mps_free_command(sc, cm);
1811 mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1816 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1817 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1819 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1820 req->IoFlags = htole16(csio->cdb_len);
1823 * Check if EEDP is supported and enabled. If it is then check if the
1824 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1825 * is formatted for EEDP support. If all of this is true, set CDB up
1826 * for EEDP transfer.
1828 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1829 if (sc->eedp_enabled && eedp_flags) {
1830 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1831 if (lun->lun_id == csio->ccb_h.target_lun) {
1836 if ((lun != NULL) && (lun->eedp_formatted)) {
1837 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1838 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1839 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1840 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1841 req->EEDPFlags = htole16(eedp_flags);
1844 * If CDB less than 32, fill in Primary Ref Tag with
1845 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1846 * already there. Also, set protection bit. FreeBSD
1847 * currently does not support CDBs bigger than 16, but
1848 * the code doesn't hurt, and will be here for the
1851 if (csio->cdb_len != 32) {
1852 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1853 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1854 PrimaryReferenceTag;
1855 for (i = 0; i < 4; i++) {
1857 req->CDB.CDB32[lba_byte + i];
1860 req->CDB.EEDP32.PrimaryReferenceTag =
1861 htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1862 req->CDB.EEDP32.PrimaryApplicationTagMask =
1864 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1868 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1869 req->EEDPFlags = htole16(eedp_flags);
1870 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1876 cm->cm_length = csio->dxfer_len;
1877 if (cm->cm_length != 0) {
1879 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1883 cm->cm_sge = &req->SGL;
1884 cm->cm_sglsize = (32 - 24) * 4;
1885 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1886 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1887 cm->cm_complete = mpssas_scsiio_complete;
1888 cm->cm_complete_data = ccb;
1890 cm->cm_lun = csio->ccb_h.target_lun;
1894 * If HBA is a WD and the command is not for a retry, try to build a
1895 * direct I/O message. If failed, or the command is for a retry, send
1896 * the I/O to the IR volume itself.
1898 if (sc->WD_valid_config) {
1899 if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1900 mpssas_direct_drive_io(sassc, cm, ccb);
1902 mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1906 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1907 if (csio->bio != NULL)
1908 biotrack(csio->bio, __func__);
1910 csio->ccb_h.qos.sim_data = sbinuptime();
1911 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1912 mpssas_scsiio_timeout, cm, 0);
1915 targ->outstanding++;
1916 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1917 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1919 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1920 __func__, cm, ccb, targ->outstanding);
1922 mps_map_command(sc, cm);
1927 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1930 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1931 Mpi2SCSIIOReply_t *mpi_reply)
1935 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1936 MPI2_IOCSTATUS_MASK;
1937 u8 scsi_state = mpi_reply->SCSIState;
1938 u8 scsi_status = mpi_reply->SCSIStatus;
1939 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1940 const char *desc_ioc_state, *desc_scsi_status;
1942 if (log_info == 0x31170000)
1945 desc_ioc_state = mps_describe_table(mps_iocstatus_string,
1947 desc_scsi_status = mps_describe_table(mps_scsi_status_string,
1950 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
1951 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
1954 *We can add more detail about underflow data here
1957 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
1958 "scsi_state %b\n", desc_scsi_status, scsi_status,
1959 scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
1960 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
1962 if (sc->mps_debug & MPS_XINFO &&
1963 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1964 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
1965 scsi_sense_print(csio);
1966 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
1969 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1970 response_info = le32toh(mpi_reply->ResponseInfo);
1971 response_bytes = (u8 *)&response_info;
1972 mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
1974 mps_describe_table(mps_scsi_taskmgmt_string,
1975 response_bytes[0]));
1980 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1982 MPI2_SCSI_IO_REPLY *rep;
1984 struct ccb_scsiio *csio;
1985 struct mpssas_softc *sassc;
1986 struct scsi_vpd_supported_page_list *vpd_list = NULL;
1987 u8 *TLR_bits, TLR_on;
1990 struct mpssas_target *target;
1991 target_id_t target_id;
1994 mps_dprint(sc, MPS_TRACE,
1995 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
1996 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1997 cm->cm_targ->outstanding);
1999 callout_stop(&cm->cm_callout);
2000 mtx_assert(&sc->mps_mtx, MA_OWNED);
2003 ccb = cm->cm_complete_data;
2005 target_id = csio->ccb_h.target_id;
2006 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2008 * XXX KDM if the chain allocation fails, does it matter if we do
2009 * the sync and unload here? It is simpler to do it in every case,
2010 * assuming it doesn't cause problems.
2012 if (cm->cm_data != NULL) {
2013 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2014 dir = BUS_DMASYNC_POSTREAD;
2015 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2016 dir = BUS_DMASYNC_POSTWRITE;
2017 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2018 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2021 cm->cm_targ->completed++;
2022 cm->cm_targ->outstanding--;
2023 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2024 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2026 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2027 if (ccb->csio.bio != NULL)
2028 biotrack(ccb->csio.bio, __func__);
2031 if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
2032 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2033 KASSERT(cm->cm_state == MPS_CM_STATE_BUSY,
2034 ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2035 cm->cm_flags &= ~MPS_CM_FLAGS_ON_RECOVERY;
2036 if (cm->cm_reply != NULL)
2037 mpssas_log_command(cm, MPS_RECOVERY,
2038 "completed timedout cm %p ccb %p during recovery "
2039 "ioc %x scsi %x state %x xfer %u\n",
2040 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2041 rep->SCSIStatus, rep->SCSIState,
2042 le32toh(rep->TransferCount));
2044 mpssas_log_command(cm, MPS_RECOVERY,
2045 "completed timedout cm %p ccb %p during recovery\n",
2047 } else if (cm->cm_targ->tm != NULL) {
2048 if (cm->cm_reply != NULL)
2049 mpssas_log_command(cm, MPS_RECOVERY,
2050 "completed cm %p ccb %p during recovery "
2051 "ioc %x scsi %x state %x xfer %u\n",
2052 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2053 rep->SCSIStatus, rep->SCSIState,
2054 le32toh(rep->TransferCount));
2056 mpssas_log_command(cm, MPS_RECOVERY,
2057 "completed cm %p ccb %p during recovery\n",
2059 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2060 mpssas_log_command(cm, MPS_RECOVERY,
2061 "reset completed cm %p ccb %p\n",
2065 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2067 * We ran into an error after we tried to map the command,
2068 * so we're getting a callback without queueing the command
2069 * to the hardware. So we set the status here, and it will
2070 * be retained below. We'll go through the "fast path",
2071 * because there can be no reply when we haven't actually
2072 * gone out to the hardware.
2074 mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2077 * Currently the only error included in the mask is
2078 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2079 * chain frames. We need to freeze the queue until we get
2080 * a command that completed without this error, which will
2081 * hopefully have some chain frames attached that we can
2082 * use. If we wanted to get smarter about it, we would
2083 * only unfreeze the queue in this condition when we're
2084 * sure that we're getting some chain frames back. That's
2085 * probably unnecessary.
2087 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2088 xpt_freeze_simq(sassc->sim, 1);
2089 sassc->flags |= MPSSAS_QUEUE_FROZEN;
2090 mps_dprint(sc, MPS_XINFO, "Error sending command, "
2091 "freezing SIM queue\n");
2096 * If this is a Start Stop Unit command and it was issued by the driver
2097 * during shutdown, decrement the refcount to account for all of the
2098 * commands that were sent. All SSU commands should be completed before
2099 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2102 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2103 mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2107 /* Take the fast path to completion */
2108 if (cm->cm_reply == NULL) {
2109 if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2110 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2111 mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2113 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2114 ccb->csio.scsi_status = SCSI_STATUS_OK;
2116 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2117 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2118 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2119 mps_dprint(sc, MPS_XINFO,
2120 "Unfreezing SIM queue\n");
2125 * There are two scenarios where the status won't be
2126 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
2127 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2129 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2131 * Freeze the dev queue so that commands are
2132 * executed in the correct order after error
2135 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2136 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2138 mps_free_command(sc, cm);
2143 mpssas_log_command(cm, MPS_XINFO,
2144 "ioc %x scsi %x state %x xfer %u\n",
2145 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2146 le32toh(rep->TransferCount));
2149 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2150 * Volume if an error occurred (normal I/O retry). Use the original
2151 * CCB, but set a flag that this will be a retry so that it's sent to
2152 * the original volume. Free the command but reuse the CCB.
2154 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2155 mps_free_command(sc, cm);
2156 ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2157 mpssas_action_scsiio(sassc, ccb);
2160 ccb->ccb_h.sim_priv.entries[0].field = 0;
2162 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2163 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2164 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2166 case MPI2_IOCSTATUS_SUCCESS:
2167 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2169 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2170 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2171 mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2173 /* Completion failed at the transport level. */
2174 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2175 MPI2_SCSI_STATE_TERMINATED)) {
2176 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2180 /* In a modern packetized environment, an autosense failure
2181 * implies that there's not much else that can be done to
2182 * recover the command.
2184 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2185 mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2190 * CAM doesn't care about SAS Response Info data, but if this is
2191 * the state check if TLR should be done. If not, clear the
2192 * TLR_bits for the target.
2194 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2195 ((le32toh(rep->ResponseInfo) &
2196 MPI2_SCSI_RI_MASK_REASONCODE) ==
2197 MPS_SCSI_RI_INVALID_FRAME)) {
2198 sc->mapping_table[target_id].TLR_bits =
2199 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2203 * Intentionally override the normal SCSI status reporting
2204 * for these two cases. These are likely to happen in a
2205 * multi-initiator environment, and we want to make sure that
2206 * CAM retries these commands rather than fail them.
2208 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2209 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2210 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2214 /* Handle normal status and sense */
2215 csio->scsi_status = rep->SCSIStatus;
2216 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2217 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2219 mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2221 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2222 int sense_len, returned_sense_len;
2224 returned_sense_len = min(le32toh(rep->SenseCount),
2225 sizeof(struct scsi_sense_data));
2226 if (returned_sense_len < ccb->csio.sense_len)
2227 ccb->csio.sense_resid = ccb->csio.sense_len -
2230 ccb->csio.sense_resid = 0;
2232 sense_len = min(returned_sense_len,
2233 ccb->csio.sense_len - ccb->csio.sense_resid);
2234 bzero(&ccb->csio.sense_data,
2235 sizeof(ccb->csio.sense_data));
2236 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2237 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2241 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2242 * and it's page code 0 (Supported Page List), and there is
2243 * inquiry data, and this is for a sequential access device, and
2244 * the device is an SSP target, and TLR is supported by the
2245 * controller, turn the TLR_bits value ON if page 0x90 is
2248 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2249 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2250 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2251 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2252 (csio->data_ptr != NULL) &&
2253 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2254 (sc->control_TLR) &&
2255 (sc->mapping_table[target_id].device_info &
2256 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2257 vpd_list = (struct scsi_vpd_supported_page_list *)
2259 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2260 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2261 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2262 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2263 csio->cdb_io.cdb_bytes[4];
2264 alloc_len -= csio->resid;
2265 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2266 if (vpd_list->list[i] == 0x90) {
2274 * If this is a SATA direct-access end device, mark it so that
2275 * a SCSI StartStopUnit command will be sent to it when the
2276 * driver is being shutdown.
2278 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2279 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2280 (sc->mapping_table[target_id].device_info &
2281 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2282 ((sc->mapping_table[target_id].device_info &
2283 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2284 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2285 target = &sassc->targets[target_id];
2286 target->supports_SSU = TRUE;
2287 mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2291 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2292 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2294 * If devinfo is 0 this will be a volume. In that case don't
2295 * tell CAM that the volume is not there. We want volumes to
2296 * be enumerated until they are deleted/removed, not just
2299 if (cm->cm_targ->devinfo == 0)
2300 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2302 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2304 case MPI2_IOCSTATUS_INVALID_SGL:
2305 mps_print_scsiio_cmd(sc, cm);
2306 mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2308 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2310 * This is one of the responses that comes back when an I/O
2311 * has been aborted. If it is because of a timeout that we
2312 * initiated, just set the status to CAM_CMD_TIMEOUT.
2313 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2314 * command is the same (it gets retried, subject to the
2315 * retry counter), the only difference is what gets printed
2318 if (cm->cm_flags & MPS_CM_FLAGS_TIMEDOUT)
2319 mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2321 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2323 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2324 /* resid is ignored for this condition */
2326 mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2328 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2329 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2331 * These can sometimes be transient transport-related
2332 * errors, and sometimes persistent drive-related errors.
2333 * We used to retry these without decrementing the retry
2334 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2335 * we hit a persistent drive problem that returns one of
2336 * these error codes, we would retry indefinitely. So,
2337 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2338 * count and avoid infinite retries. We're taking the
2339 * potential risk of flagging false failures in the event
2340 * of a topology-related error (e.g. a SAS expander problem
2341 * causes a command addressed to a drive to fail), but
2342 * avoiding getting into an infinite retry loop. However,
2343 * if we get them while were moving a device, we should
2344 * fail the request as 'not there' because the device
2345 * is effectively gone.
2347 if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL)
2348 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2350 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2351 mps_dprint(sc, MPS_INFO,
2352 "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2353 mps_describe_table(mps_iocstatus_string,
2354 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2355 target_id, cm->cm_desc.Default.SMID,
2356 le32toh(rep->IOCLogInfo),
2357 (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) ? " departing" : "");
2358 mps_dprint(sc, MPS_XINFO,
2359 "SCSIStatus %x SCSIState %x xfercount %u\n",
2360 rep->SCSIStatus, rep->SCSIState,
2361 le32toh(rep->TransferCount));
2363 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2364 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2365 case MPI2_IOCSTATUS_INVALID_VPID:
2366 case MPI2_IOCSTATUS_INVALID_FIELD:
2367 case MPI2_IOCSTATUS_INVALID_STATE:
2368 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2369 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2370 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2371 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2372 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2374 mpssas_log_command(cm, MPS_XINFO,
2375 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2376 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2377 rep->SCSIStatus, rep->SCSIState,
2378 le32toh(rep->TransferCount));
2379 csio->resid = cm->cm_length;
2380 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2384 mps_sc_failed_io_info(sc,csio,rep);
2386 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2387 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2388 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2389 mps_dprint(sc, MPS_XINFO, "Command completed, "
2390 "unfreezing SIM queue\n");
2393 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2394 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2395 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2399 * Check to see if we're removing the device. If so, and this is the
2400 * last command on the queue, proceed with the deferred removal of the
2401 * device. Note, for removing a volume, this won't trigger because
2402 * pending_remove_tm will be NULL.
2404 if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) {
2405 if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2406 cm->cm_targ->pending_remove_tm != NULL) {
2407 mps_dprint(sc, MPS_INFO, "Last pending command complete: starting remove_device\n");
2408 mps_map_command(sc, cm->cm_targ->pending_remove_tm);
2409 cm->cm_targ->pending_remove_tm = NULL;
2413 mps_free_command(sc, cm);
2417 /* All Request reached here are Endian safe */
2419 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2421 pMpi2SCSIIORequest_t pIO_req;
2422 struct mps_softc *sc = sassc->sc;
2424 uint32_t physLBA, stripe_offset, stripe_unit;
2425 uint32_t io_size, column;
2426 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2429 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2430 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2431 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2432 * bit different than the 10/16 CDBs, handle them separately.
2434 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2435 CDB = pIO_req->CDB.CDB32;
2438 * Handle 6 byte CDBs.
2440 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2441 (CDB[0] == WRITE_6))) {
2443 * Get the transfer size in blocks.
2445 io_size = (cm->cm_length >> sc->DD_block_exponent);
2448 * Get virtual LBA given in the CDB.
2450 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2451 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2454 * Check that LBA range for I/O does not exceed volume's
2457 if ((virtLBA + (uint64_t)io_size - 1) <=
2460 * Check if the I/O crosses a stripe boundary. If not,
2461 * translate the virtual LBA to a physical LBA and set
2462 * the DevHandle for the PhysDisk to be used. If it
2463 * does cross a boundary, do normal I/O. To get the
2464 * right DevHandle to use, get the map number for the
2465 * column, then use that map number to look up the
2466 * DevHandle of the PhysDisk.
2468 stripe_offset = (uint32_t)virtLBA &
2469 (sc->DD_stripe_size - 1);
2470 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2471 physLBA = (uint32_t)virtLBA >>
2472 sc->DD_stripe_exponent;
2473 stripe_unit = physLBA / sc->DD_num_phys_disks;
2474 column = physLBA % sc->DD_num_phys_disks;
2475 pIO_req->DevHandle =
2476 htole16(sc->DD_column_map[column].dev_handle);
2477 /* ???? Is this endian safe*/
2478 cm->cm_desc.SCSIIO.DevHandle =
2481 physLBA = (stripe_unit <<
2482 sc->DD_stripe_exponent) + stripe_offset;
2483 ptrLBA = &pIO_req->CDB.CDB32[1];
2484 physLBA_byte = (uint8_t)(physLBA >> 16);
2485 *ptrLBA = physLBA_byte;
2486 ptrLBA = &pIO_req->CDB.CDB32[2];
2487 physLBA_byte = (uint8_t)(physLBA >> 8);
2488 *ptrLBA = physLBA_byte;
2489 ptrLBA = &pIO_req->CDB.CDB32[3];
2490 physLBA_byte = (uint8_t)physLBA;
2491 *ptrLBA = physLBA_byte;
2494 * Set flag that Direct Drive I/O is
2497 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2504 * Handle 10, 12 or 16 byte CDBs.
2506 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2507 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2508 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2509 (CDB[0] == WRITE_12))) {
2511 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2512 * are 0. If not, this is accessing beyond 2TB so handle it in
2513 * the else section. 10-byte and 12-byte CDB's are OK.
2514 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2515 * ready to accept 12byte CDB for Direct IOs.
2517 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2518 (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2519 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2521 * Get the transfer size in blocks.
2523 io_size = (cm->cm_length >> sc->DD_block_exponent);
2526 * Get virtual LBA. Point to correct lower 4 bytes of
2527 * LBA in the CDB depending on command.
2529 lba_idx = ((CDB[0] == READ_12) ||
2530 (CDB[0] == WRITE_12) ||
2531 (CDB[0] == READ_10) ||
2532 (CDB[0] == WRITE_10))? 2 : 6;
2533 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2534 ((uint64_t)CDB[lba_idx + 1] << 16) |
2535 ((uint64_t)CDB[lba_idx + 2] << 8) |
2536 (uint64_t)CDB[lba_idx + 3];
2539 * Check that LBA range for I/O does not exceed volume's
2542 if ((virtLBA + (uint64_t)io_size - 1) <=
2545 * Check if the I/O crosses a stripe boundary.
2546 * If not, translate the virtual LBA to a
2547 * physical LBA and set the DevHandle for the
2548 * PhysDisk to be used. If it does cross a
2549 * boundary, do normal I/O. To get the right
2550 * DevHandle to use, get the map number for the
2551 * column, then use that map number to look up
2552 * the DevHandle of the PhysDisk.
2554 stripe_offset = (uint32_t)virtLBA &
2555 (sc->DD_stripe_size - 1);
2556 if ((stripe_offset + io_size) <=
2557 sc->DD_stripe_size) {
2558 physLBA = (uint32_t)virtLBA >>
2559 sc->DD_stripe_exponent;
2560 stripe_unit = physLBA /
2561 sc->DD_num_phys_disks;
2563 sc->DD_num_phys_disks;
2564 pIO_req->DevHandle =
2565 htole16(sc->DD_column_map[column].
2567 cm->cm_desc.SCSIIO.DevHandle =
2570 physLBA = (stripe_unit <<
2571 sc->DD_stripe_exponent) +
2574 &pIO_req->CDB.CDB32[lba_idx];
2575 physLBA_byte = (uint8_t)(physLBA >> 24);
2576 *ptrLBA = physLBA_byte;
2578 &pIO_req->CDB.CDB32[lba_idx + 1];
2579 physLBA_byte = (uint8_t)(physLBA >> 16);
2580 *ptrLBA = physLBA_byte;
2582 &pIO_req->CDB.CDB32[lba_idx + 2];
2583 physLBA_byte = (uint8_t)(physLBA >> 8);
2584 *ptrLBA = physLBA_byte;
2586 &pIO_req->CDB.CDB32[lba_idx + 3];
2587 physLBA_byte = (uint8_t)physLBA;
2588 *ptrLBA = physLBA_byte;
2591 * Set flag that Direct Drive I/O is
2594 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2599 * 16-byte CDB and the upper 4 bytes of the CDB are not
2600 * 0. Get the transfer size in blocks.
2602 io_size = (cm->cm_length >> sc->DD_block_exponent);
2607 virtLBA = ((uint64_t)CDB[2] << 54) |
2608 ((uint64_t)CDB[3] << 48) |
2609 ((uint64_t)CDB[4] << 40) |
2610 ((uint64_t)CDB[5] << 32) |
2611 ((uint64_t)CDB[6] << 24) |
2612 ((uint64_t)CDB[7] << 16) |
2613 ((uint64_t)CDB[8] << 8) |
2617 * Check that LBA range for I/O does not exceed volume's
2620 if ((virtLBA + (uint64_t)io_size - 1) <=
2623 * Check if the I/O crosses a stripe boundary.
2624 * If not, translate the virtual LBA to a
2625 * physical LBA and set the DevHandle for the
2626 * PhysDisk to be used. If it does cross a
2627 * boundary, do normal I/O. To get the right
2628 * DevHandle to use, get the map number for the
2629 * column, then use that map number to look up
2630 * the DevHandle of the PhysDisk.
2632 stripe_offset = (uint32_t)virtLBA &
2633 (sc->DD_stripe_size - 1);
2634 if ((stripe_offset + io_size) <=
2635 sc->DD_stripe_size) {
2636 physLBA = (uint32_t)(virtLBA >>
2637 sc->DD_stripe_exponent);
2638 stripe_unit = physLBA /
2639 sc->DD_num_phys_disks;
2641 sc->DD_num_phys_disks;
2642 pIO_req->DevHandle =
2643 htole16(sc->DD_column_map[column].
2645 cm->cm_desc.SCSIIO.DevHandle =
2648 physLBA = (stripe_unit <<
2649 sc->DD_stripe_exponent) +
2653 * Set upper 4 bytes of LBA to 0. We
2654 * assume that the phys disks are less
2655 * than 2 TB's in size. Then, set the
2658 pIO_req->CDB.CDB32[2] = 0;
2659 pIO_req->CDB.CDB32[3] = 0;
2660 pIO_req->CDB.CDB32[4] = 0;
2661 pIO_req->CDB.CDB32[5] = 0;
2662 ptrLBA = &pIO_req->CDB.CDB32[6];
2663 physLBA_byte = (uint8_t)(physLBA >> 24);
2664 *ptrLBA = physLBA_byte;
2665 ptrLBA = &pIO_req->CDB.CDB32[7];
2666 physLBA_byte = (uint8_t)(physLBA >> 16);
2667 *ptrLBA = physLBA_byte;
2668 ptrLBA = &pIO_req->CDB.CDB32[8];
2669 physLBA_byte = (uint8_t)(physLBA >> 8);
2670 *ptrLBA = physLBA_byte;
2671 ptrLBA = &pIO_req->CDB.CDB32[9];
2672 physLBA_byte = (uint8_t)physLBA;
2673 *ptrLBA = physLBA_byte;
2676 * Set flag that Direct Drive I/O is
2679 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2687 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2689 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2690 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2694 ccb = cm->cm_complete_data;
2697 * Currently there should be no way we can hit this case. It only
2698 * happens when we have a failure to allocate chain frames, and SMP
2699 * commands require two S/G elements only. That should be handled
2700 * in the standard request size.
2702 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2703 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2704 __func__, cm->cm_flags);
2705 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2709 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2711 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2712 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2716 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2717 sasaddr = le32toh(req->SASAddress.Low);
2718 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2720 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2721 MPI2_IOCSTATUS_SUCCESS ||
2722 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2723 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2724 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2725 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2729 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2730 "%#jx completed successfully\n", __func__,
2731 (uintmax_t)sasaddr);
2733 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2734 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2736 mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2740 * We sync in both directions because we had DMAs in the S/G list
2741 * in both directions.
2743 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2744 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2745 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2746 mps_free_command(sc, cm);
2751 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2753 struct mps_command *cm;
2754 uint8_t *request, *response;
2755 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2756 struct mps_softc *sc;
2763 * XXX We don't yet support physical addresses here.
2765 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2766 case CAM_DATA_PADDR:
2767 case CAM_DATA_SG_PADDR:
2768 mps_dprint(sc, MPS_ERROR,
2769 "%s: physical addresses not supported\n", __func__);
2770 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2775 * The chip does not support more than one buffer for the
2776 * request or response.
2778 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2779 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2780 mps_dprint(sc, MPS_ERROR,
2781 "%s: multiple request or response "
2782 "buffer segments not supported for SMP\n",
2784 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2790 * The CAM_SCATTER_VALID flag was originally implemented
2791 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2792 * We have two. So, just take that flag to mean that we
2793 * might have S/G lists, and look at the S/G segment count
2794 * to figure out whether that is the case for each individual
2797 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2798 bus_dma_segment_t *req_sg;
2800 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2801 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2803 request = ccb->smpio.smp_request;
2805 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2806 bus_dma_segment_t *rsp_sg;
2808 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2809 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2811 response = ccb->smpio.smp_response;
2813 case CAM_DATA_VADDR:
2814 request = ccb->smpio.smp_request;
2815 response = ccb->smpio.smp_response;
2818 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2823 cm = mps_alloc_command(sc);
2825 mps_dprint(sc, MPS_ERROR,
2826 "%s: cannot allocate command\n", __func__);
2827 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2832 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2833 bzero(req, sizeof(*req));
2834 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2836 /* Allow the chip to use any route to this SAS address. */
2837 req->PhysicalPort = 0xff;
2839 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2841 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2843 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2844 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2846 mpi_init_sge(cm, req, &req->SGL);
2849 * Set up a uio to pass into mps_map_command(). This allows us to
2850 * do one map command, and one busdma call in there.
2852 cm->cm_uio.uio_iov = cm->cm_iovec;
2853 cm->cm_uio.uio_iovcnt = 2;
2854 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2857 * The read/write flag isn't used by busdma, but set it just in
2858 * case. This isn't exactly accurate, either, since we're going in
2861 cm->cm_uio.uio_rw = UIO_WRITE;
2863 cm->cm_iovec[0].iov_base = request;
2864 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2865 cm->cm_iovec[1].iov_base = response;
2866 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2868 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2869 cm->cm_iovec[1].iov_len;
2872 * Trigger a warning message in mps_data_cb() for the user if we
2873 * wind up exceeding two S/G segments. The chip expects one
2874 * segment for the request and another for the response.
2876 cm->cm_max_segs = 2;
2878 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2879 cm->cm_complete = mpssas_smpio_complete;
2880 cm->cm_complete_data = ccb;
2883 * Tell the mapping code that we're using a uio, and that this is
2884 * an SMP passthrough request. There is a little special-case
2885 * logic there (in mps_data_cb()) to handle the bidirectional
2888 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2889 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2891 /* The chip data format is little endian. */
2892 req->SASAddress.High = htole32(sasaddr >> 32);
2893 req->SASAddress.Low = htole32(sasaddr);
2896 * XXX Note that we don't have a timeout/abort mechanism here.
2897 * From the manual, it looks like task management requests only
2898 * work for SCSI IO and SATA passthrough requests. We may need to
2899 * have a mechanism to retry requests in the event of a chip reset
2900 * at least. Hopefully the chip will insure that any errors short
2901 * of that are relayed back to the driver.
2903 error = mps_map_command(sc, cm);
2904 if ((error != 0) && (error != EINPROGRESS)) {
2905 mps_dprint(sc, MPS_ERROR,
2906 "%s: error %d returned from mps_map_command()\n",
2914 mps_free_command(sc, cm);
2915 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2922 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2924 struct mps_softc *sc;
2925 struct mpssas_target *targ;
2926 uint64_t sasaddr = 0;
2931 * Make sure the target exists.
2933 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2934 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2935 targ = &sassc->targets[ccb->ccb_h.target_id];
2936 if (targ->handle == 0x0) {
2937 mps_dprint(sc, MPS_ERROR,
2938 "%s: target %d does not exist!\n", __func__,
2939 ccb->ccb_h.target_id);
2940 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2946 * If this device has an embedded SMP target, we'll talk to it
2948 * figure out what the expander's address is.
2950 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2951 sasaddr = targ->sasaddr;
2954 * If we don't have a SAS address for the expander yet, try
2955 * grabbing it from the page 0x83 information cached in the
2956 * transport layer for this target. LSI expanders report the
2957 * expander SAS address as the port-associated SAS address in
2958 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2961 * XXX KDM disable this for now, but leave it commented out so that
2962 * it is obvious that this is another possible way to get the SAS
2965 * The parent handle method below is a little more reliable, and
2966 * the other benefit is that it works for devices other than SES
2967 * devices. So you can send a SMP request to a da(4) device and it
2968 * will get routed to the expander that device is attached to.
2969 * (Assuming the da(4) device doesn't contain an SMP target...)
2973 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2977 * If we still don't have a SAS address for the expander, look for
2978 * the parent device of this device, which is probably the expander.
2981 #ifdef OLD_MPS_PROBE
2982 struct mpssas_target *parent_target;
2985 if (targ->parent_handle == 0x0) {
2986 mps_dprint(sc, MPS_ERROR,
2987 "%s: handle %d does not have a valid "
2988 "parent handle!\n", __func__, targ->handle);
2989 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2992 #ifdef OLD_MPS_PROBE
2993 parent_target = mpssas_find_target_by_handle(sassc, 0,
2994 targ->parent_handle);
2996 if (parent_target == NULL) {
2997 mps_dprint(sc, MPS_ERROR,
2998 "%s: handle %d does not have a valid "
2999 "parent target!\n", __func__, targ->handle);
3000 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3004 if ((parent_target->devinfo &
3005 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3006 mps_dprint(sc, MPS_ERROR,
3007 "%s: handle %d parent %d does not "
3008 "have an SMP target!\n", __func__,
3009 targ->handle, parent_target->handle);
3010 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3015 sasaddr = parent_target->sasaddr;
3016 #else /* OLD_MPS_PROBE */
3017 if ((targ->parent_devinfo &
3018 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3019 mps_dprint(sc, MPS_ERROR,
3020 "%s: handle %d parent %d does not "
3021 "have an SMP target!\n", __func__,
3022 targ->handle, targ->parent_handle);
3023 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3027 if (targ->parent_sasaddr == 0x0) {
3028 mps_dprint(sc, MPS_ERROR,
3029 "%s: handle %d parent handle %d does "
3030 "not have a valid SAS address!\n",
3031 __func__, targ->handle, targ->parent_handle);
3032 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3036 sasaddr = targ->parent_sasaddr;
3037 #endif /* OLD_MPS_PROBE */
3042 mps_dprint(sc, MPS_INFO,
3043 "%s: unable to find SAS address for handle %d\n",
3044 __func__, targ->handle);
3045 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3048 mpssas_send_smpcmd(sassc, ccb, sasaddr);
3058 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3060 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3061 struct mps_softc *sc;
3062 struct mps_command *tm;
3063 struct mpssas_target *targ;
3065 MPS_FUNCTRACE(sassc->sc);
3066 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3068 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3069 ("Target %d out of bounds in XPT_RESET_DEV\n",
3070 ccb->ccb_h.target_id));
3072 tm = mpssas_alloc_tm(sc);
3074 mps_dprint(sc, MPS_ERROR,
3075 "command alloc failure in mpssas_action_resetdev\n");
3076 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3081 targ = &sassc->targets[ccb->ccb_h.target_id];
3082 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3083 req->DevHandle = htole16(targ->handle);
3084 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3086 /* SAS Hard Link Reset / SATA Link Reset */
3087 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3090 tm->cm_complete = mpssas_resetdev_complete;
3091 tm->cm_complete_data = ccb;
3094 mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3095 mps_map_command(sc, tm);
3099 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3101 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3105 mtx_assert(&sc->mps_mtx, MA_OWNED);
3107 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3108 ccb = tm->cm_complete_data;
3111 * Currently there should be no way we can hit this case. It only
3112 * happens when we have a failure to allocate chain frames, and
3113 * task management commands don't have S/G lists.
3115 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3116 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3118 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3120 mps_dprint(sc, MPS_ERROR,
3121 "%s: cm_flags = %#x for reset of handle %#04x! "
3122 "This should not happen!\n", __func__, tm->cm_flags,
3124 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3128 mps_dprint(sc, MPS_XINFO,
3129 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3130 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3132 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3133 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3134 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3138 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3142 mpssas_free_tm(sc, tm);
3147 mpssas_poll(struct cam_sim *sim)
3149 struct mpssas_softc *sassc;
3151 sassc = cam_sim_softc(sim);
3153 if (sassc->sc->mps_debug & MPS_TRACE) {
3154 /* frequent debug messages during a panic just slow
3155 * everything down too much.
3157 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3158 sassc->sc->mps_debug &= ~MPS_TRACE;
3161 mps_intr_locked(sassc->sc);
3165 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3168 struct mps_softc *sc;
3170 sc = (struct mps_softc *)callback_arg;
3173 case AC_ADVINFO_CHANGED: {
3174 struct mpssas_target *target;
3175 struct mpssas_softc *sassc;
3176 struct scsi_read_capacity_data_long rcap_buf;
3177 struct ccb_dev_advinfo cdai;
3178 struct mpssas_lun *lun;
3183 buftype = (uintptr_t)arg;
3189 * We're only interested in read capacity data changes.
3191 if (buftype != CDAI_TYPE_RCAPLONG)
3195 * We should have a handle for this, but check to make sure.
3197 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3198 ("Target %d out of bounds in mpssas_async\n",
3199 xpt_path_target_id(path)));
3200 target = &sassc->targets[xpt_path_target_id(path)];
3201 if (target->handle == 0)
3204 lunid = xpt_path_lun_id(path);
3206 SLIST_FOREACH(lun, &target->luns, lun_link) {
3207 if (lun->lun_id == lunid) {
3213 if (found_lun == 0) {
3214 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3217 mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3218 "LUN for EEDP support.\n");
3221 lun->lun_id = lunid;
3222 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3225 bzero(&rcap_buf, sizeof(rcap_buf));
3226 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3227 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3228 cdai.ccb_h.flags = CAM_DIR_IN;
3229 cdai.buftype = CDAI_TYPE_RCAPLONG;
3230 cdai.flags = CDAI_FLAG_NONE;
3231 cdai.bufsiz = sizeof(rcap_buf);
3232 cdai.buf = (uint8_t *)&rcap_buf;
3233 xpt_action((union ccb *)&cdai);
3234 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3235 cam_release_devq(cdai.ccb_h.path,
3238 if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3239 && (rcap_buf.prot & SRC16_PROT_EN)) {
3240 switch (rcap_buf.prot & SRC16_P_TYPE) {
3243 lun->eedp_formatted = TRUE;
3244 lun->eedp_block_size =
3245 scsi_4btoul(rcap_buf.length);
3249 lun->eedp_formatted = FALSE;
3250 lun->eedp_block_size = 0;
3254 lun->eedp_formatted = FALSE;
3255 lun->eedp_block_size = 0;
3265 * Set the INRESET flag for this target so that no I/O will be sent to
3266 * the target until the reset has completed. If an I/O request does
3267 * happen, the devq will be frozen. The CCB holds the path which is
3268 * used to release the devq. The devq is released and the CCB is freed
3269 * when the TM completes.
3272 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3273 struct mpssas_target *target, lun_id_t lun_id)
3278 ccb = xpt_alloc_ccb_nowait();
3280 path_id = cam_sim_path(sc->sassc->sim);
3281 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3282 target->tid, lun_id) != CAM_REQ_CMP) {
3286 tm->cm_targ = target;
3287 target->flags |= MPSSAS_TARGET_INRESET;
3293 mpssas_startup(struct mps_softc *sc)
3297 * Send the port enable message and set the wait_for_port_enable flag.
3298 * This flag helps to keep the simq frozen until all discovery events
3301 sc->wait_for_port_enable = 1;
3302 mpssas_send_portenable(sc);
3307 mpssas_send_portenable(struct mps_softc *sc)
3309 MPI2_PORT_ENABLE_REQUEST *request;
3310 struct mps_command *cm;
3314 if ((cm = mps_alloc_command(sc)) == NULL)
3316 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3317 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3318 request->MsgFlags = 0;
3320 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3321 cm->cm_complete = mpssas_portenable_complete;
3325 mps_map_command(sc, cm);
3326 mps_dprint(sc, MPS_XINFO,
3327 "mps_send_portenable finished cm %p req %p complete %p\n",
3328 cm, cm->cm_req, cm->cm_complete);
3333 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3335 MPI2_PORT_ENABLE_REPLY *reply;
3336 struct mpssas_softc *sassc;
3342 * Currently there should be no way we can hit this case. It only
3343 * happens when we have a failure to allocate chain frames, and
3344 * port enable commands don't have S/G lists.
3346 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3347 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3348 "This should not happen!\n", __func__, cm->cm_flags);
3351 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3353 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3354 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3355 MPI2_IOCSTATUS_SUCCESS)
3356 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3358 mps_free_command(sc, cm);
3361 * Get WarpDrive info after discovery is complete but before the scan
3362 * starts. At this point, all devices are ready to be exposed to the
3363 * OS. If devices should be hidden instead, take them out of the
3364 * 'targets' array before the scan. The devinfo for a disk will have
3365 * some info and a volume's will be 0. Use that to remove disks.
3367 mps_wd_config_pages(sc);
3370 * Done waiting for port enable to complete. Decrement the refcount.
3371 * If refcount is 0, discovery is complete and a rescan of the bus can
3372 * take place. Since the simq was explicitly frozen before port
3373 * enable, it must be explicitly released here to keep the
3374 * freeze/release count in sync.
3376 sc->wait_for_port_enable = 0;
3377 sc->port_enable_complete = 1;
3378 wakeup(&sc->port_enable_complete);
3379 mpssas_startup_decrement(sassc);
3383 mpssas_check_id(struct mpssas_softc *sassc, int id)
3385 struct mps_softc *sc = sassc->sc;
3389 ids = &sc->exclude_ids[0];
3390 while((name = strsep(&ids, ",")) != NULL) {
3391 if (name[0] == '\0')
3393 if (strtol(name, NULL, 0) == (long)id)
3401 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3403 struct mpssas_softc *sassc;
3404 struct mpssas_lun *lun, *lun_tmp;
3405 struct mpssas_target *targ;
3410 * The number of targets is based on IOC Facts, so free all of
3411 * the allocated LUNs for each target and then the target buffer
3414 for (i=0; i< maxtargets; i++) {
3415 targ = &sassc->targets[i];
3416 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3420 free(sassc->targets, M_MPT2);
3422 sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3423 M_MPT2, M_WAITOK|M_ZERO);
3424 if (!sassc->targets) {
3425 panic("%s failed to alloc targets with error %d\n",