2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009 Yahoo! Inc.
5 * Copyright (c) 2011-2015 LSI Corp.
6 * Copyright (c) 2013-2015 Avago Technologies
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 /* Communications core for Avago Technologies (LSI) MPT2 */
40 /* TODO Move headers to mpsvar */
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/selinfo.h>
46 #include <sys/module.h>
50 #include <sys/malloc.h>
52 #include <sys/sysctl.h>
53 #include <sys/endian.h>
54 #include <sys/queue.h>
55 #include <sys/kthread.h>
56 #include <sys/taskqueue.h>
59 #include <machine/bus.h>
60 #include <machine/resource.h>
63 #include <machine/stdarg.h>
66 #include <cam/cam_ccb.h>
67 #include <cam/cam_xpt.h>
68 #include <cam/cam_debug.h>
69 #include <cam/cam_sim.h>
70 #include <cam/cam_xpt_sim.h>
71 #include <cam/cam_xpt_periph.h>
72 #include <cam/cam_periph.h>
73 #include <cam/scsi/scsi_all.h>
74 #include <cam/scsi/scsi_message.h>
75 #include <cam/scsi/smp_all.h>
77 #include <dev/mps/mpi/mpi2_type.h>
78 #include <dev/mps/mpi/mpi2.h>
79 #include <dev/mps/mpi/mpi2_ioc.h>
80 #include <dev/mps/mpi/mpi2_sas.h>
81 #include <dev/mps/mpi/mpi2_cnfg.h>
82 #include <dev/mps/mpi/mpi2_init.h>
83 #include <dev/mps/mpi/mpi2_tool.h>
84 #include <dev/mps/mps_ioctl.h>
85 #include <dev/mps/mpsvar.h>
86 #include <dev/mps/mps_table.h>
87 #include <dev/mps/mps_sas.h>
89 #define MPSSAS_DISCOVERY_TIMEOUT 20
90 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
93 * static array to check SCSI OpCode for EEDP protection bits
95 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 static uint8_t op_code_prot[256] = {
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
117 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
124 struct mps_command *cm);
125 static void mpssas_scsiio_timeout(void *data);
126 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
127 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
128 struct mps_command *cm, union ccb *ccb);
129 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
130 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
131 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
132 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
133 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
135 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
136 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137 static void mpssas_async(void *callback_arg, uint32_t code,
138 struct cam_path *path, void *arg);
139 static int mpssas_send_portenable(struct mps_softc *sc);
140 static void mpssas_portenable_complete(struct mps_softc *sc,
141 struct mps_command *cm);
143 struct mpssas_target *
144 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
146 struct mpssas_target *target;
149 for (i = start; i < sassc->maxtargets; i++) {
150 target = &sassc->targets[i];
151 if (target->handle == handle)
158 /* we need to freeze the simq during attach and diag reset, to avoid failing
159 * commands before device handles have been found by discovery. Since
160 * discovery involves reading config pages and possibly sending commands,
161 * discovery actions may continue even after we receive the end of discovery
162 * event, so refcount discovery actions instead of assuming we can unfreeze
163 * the simq when we get the event.
166 mpssas_startup_increment(struct mpssas_softc *sassc)
168 MPS_FUNCTRACE(sassc->sc);
170 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
171 if (sassc->startup_refcount++ == 0) {
172 /* just starting, freeze the simq */
173 mps_dprint(sassc->sc, MPS_INIT,
174 "%s freezing simq\n", __func__);
176 xpt_freeze_simq(sassc->sim, 1);
178 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
179 sassc->startup_refcount);
184 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
186 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
187 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
188 xpt_release_simq(sassc->sim, 1);
189 mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
194 mpssas_startup_decrement(struct mpssas_softc *sassc)
196 MPS_FUNCTRACE(sassc->sc);
198 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
199 if (--sassc->startup_refcount == 0) {
200 /* finished all discovery-related actions, release
201 * the simq and rescan for the latest topology.
203 mps_dprint(sassc->sc, MPS_INIT,
204 "%s releasing simq\n", __func__);
205 sassc->flags &= ~MPSSAS_IN_STARTUP;
206 xpt_release_simq(sassc->sim, 1);
209 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
210 sassc->startup_refcount);
215 * The firmware requires us to stop sending commands when we're doing task
217 * XXX The logic for serializing the device has been made lazy and moved to
218 * mpssas_prepare_for_tm().
221 mpssas_alloc_tm(struct mps_softc *sc)
223 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
224 struct mps_command *tm;
226 tm = mps_alloc_high_priority_command(sc);
230 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
231 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
236 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
238 int target_id = 0xFFFFFFFF;
244 * For TM's the devq is frozen for the device. Unfreeze it here and
245 * free the resources used for freezing the devq. Must clear the
246 * INRESET flag as well or scsi I/O will not work.
248 if (tm->cm_targ != NULL) {
249 tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
250 target_id = tm->cm_targ->tid;
253 mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
255 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
256 xpt_free_path(tm->cm_ccb->ccb_h.path);
257 xpt_free_ccb(tm->cm_ccb);
260 mps_free_high_priority_command(sc, tm);
264 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
266 struct mpssas_softc *sassc = sc->sassc;
268 target_id_t targetid;
272 pathid = cam_sim_path(sassc->sim);
274 targetid = CAM_TARGET_WILDCARD;
276 targetid = targ - sassc->targets;
279 * Allocate a CCB and schedule a rescan.
281 ccb = xpt_alloc_ccb_nowait();
283 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
287 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
288 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
289 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
294 if (targetid == CAM_TARGET_WILDCARD)
295 ccb->ccb_h.func_code = XPT_SCAN_BUS;
297 ccb->ccb_h.func_code = XPT_SCAN_TGT;
299 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
304 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
314 /* No need to be in here if debugging isn't enabled */
315 if ((cm->cm_sc->mps_debug & level) == 0)
318 sbuf_new(&sb, str, sizeof(str), 0);
322 if (cm->cm_ccb != NULL) {
323 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
325 sbuf_cat(&sb, path_str);
326 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
327 scsi_command_string(&cm->cm_ccb->csio, &sb);
328 sbuf_printf(&sb, "length %d ",
329 cm->cm_ccb->csio.dxfer_len);
333 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
334 cam_sim_name(cm->cm_sc->sassc->sim),
335 cam_sim_unit(cm->cm_sc->sassc->sim),
336 cam_sim_bus(cm->cm_sc->sassc->sim),
337 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
341 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
342 sbuf_vprintf(&sb, fmt, ap);
344 mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
351 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
353 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
354 struct mpssas_target *targ;
359 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
360 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
364 /* XXX retry the remove after the diag reset completes? */
365 mps_dprint(sc, MPS_FAULT,
366 "%s NULL reply resetting device 0x%04x\n", __func__,
368 mpssas_free_tm(sc, tm);
372 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
373 MPI2_IOCSTATUS_SUCCESS) {
374 mps_dprint(sc, MPS_ERROR,
375 "IOCStatus = 0x%x while resetting device 0x%x\n",
376 le16toh(reply->IOCStatus), handle);
379 mps_dprint(sc, MPS_XINFO,
380 "Reset aborted %u commands\n", reply->TerminationCount);
381 mps_free_reply(sc, tm->cm_reply_data);
382 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
384 mps_dprint(sc, MPS_XINFO,
385 "clearing target %u handle 0x%04x\n", targ->tid, handle);
388 * Don't clear target if remove fails because things will get confusing.
389 * Leave the devname and sasaddr intact so that we know to avoid reusing
390 * this target id if possible, and so we can assign the same target id
391 * to this device if it comes back in the future.
393 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
394 MPI2_IOCSTATUS_SUCCESS) {
397 targ->encl_handle = 0x0;
398 targ->encl_slot = 0x0;
399 targ->exp_dev_handle = 0x0;
401 targ->linkrate = 0x0;
406 mpssas_free_tm(sc, tm);
411 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
412 * Otherwise Volume Delete is same as Bare Drive Removal.
415 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
417 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
418 struct mps_softc *sc;
419 struct mps_command *tm;
420 struct mpssas_target *targ = NULL;
422 MPS_FUNCTRACE(sassc->sc);
427 * If this is a WD controller, determine if the disk should be exposed
428 * to the OS or not. If disk should be exposed, return from this
429 * function without doing anything.
431 if (sc->WD_available && (sc->WD_hide_expose ==
432 MPS_WD_EXPOSE_ALWAYS)) {
437 targ = mpssas_find_target_by_handle(sassc, 0, handle);
439 /* FIXME: what is the action? */
440 /* We don't know about this device? */
441 mps_dprint(sc, MPS_ERROR,
442 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446 targ->flags |= MPSSAS_TARGET_INREMOVAL;
448 tm = mpssas_alloc_tm(sc);
450 mps_dprint(sc, MPS_ERROR,
451 "%s: command alloc failure\n", __func__);
455 mpssas_rescan_target(sc, targ);
457 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
458 req->DevHandle = targ->handle;
459 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
461 /* SAS Hard Link Reset / SATA Link Reset */
462 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
466 tm->cm_complete = mpssas_remove_volume;
467 tm->cm_complete_data = (void *)(uintptr_t)handle;
469 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
470 __func__, targ->tid);
471 mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
473 mps_map_command(sc, tm);
477 * The MPT2 firmware performs debounce on the link to avoid transient link
478 * errors and false removals. When it does decide that link has been lost
479 * and a device need to go away, it expects that the host will perform a
480 * target reset and then an op remove. The reset has the side-effect of
481 * aborting any outstanding requests for the device, which is required for
482 * the op-remove to succeed. It's not clear if the host should check for
483 * the device coming back alive after the reset.
486 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
488 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
489 struct mps_softc *sc;
490 struct mps_command *cm;
491 struct mpssas_target *targ = NULL;
493 MPS_FUNCTRACE(sassc->sc);
497 targ = mpssas_find_target_by_handle(sassc, 0, handle);
499 /* FIXME: what is the action? */
500 /* We don't know about this device? */
501 mps_dprint(sc, MPS_ERROR,
502 "%s : invalid handle 0x%x \n", __func__, handle);
506 targ->flags |= MPSSAS_TARGET_INREMOVAL;
508 cm = mpssas_alloc_tm(sc);
510 mps_dprint(sc, MPS_ERROR,
511 "%s: command alloc failure\n", __func__);
515 mpssas_rescan_target(sc, targ);
517 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
518 memset(req, 0, sizeof(*req));
519 req->DevHandle = htole16(targ->handle);
520 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
522 /* SAS Hard Link Reset / SATA Link Reset */
523 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
527 cm->cm_complete = mpssas_remove_device;
528 cm->cm_complete_data = (void *)(uintptr_t)handle;
530 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
531 __func__, targ->tid);
532 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
534 mps_map_command(sc, cm);
538 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
540 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
541 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
542 struct mpssas_target *targ;
547 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
548 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
552 * Currently there should be no way we can hit this case. It only
553 * happens when we have a failure to allocate chain frames, and
554 * task management commands don't have S/G lists.
556 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
557 mps_dprint(sc, MPS_ERROR,
558 "%s: cm_flags = %#x for remove of handle %#04x! "
559 "This should not happen!\n", __func__, tm->cm_flags,
564 /* XXX retry the remove after the diag reset completes? */
565 mps_dprint(sc, MPS_FAULT,
566 "%s NULL reply resetting device 0x%04x\n", __func__,
568 mpssas_free_tm(sc, tm);
572 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
573 MPI2_IOCSTATUS_SUCCESS) {
574 mps_dprint(sc, MPS_ERROR,
575 "IOCStatus = 0x%x while resetting device 0x%x\n",
576 le16toh(reply->IOCStatus), handle);
579 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
580 le32toh(reply->TerminationCount));
581 mps_free_reply(sc, tm->cm_reply_data);
582 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
584 /* Reuse the existing command */
585 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
586 memset(req, 0, sizeof(*req));
587 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
588 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
589 req->DevHandle = htole16(handle);
591 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
592 tm->cm_complete = mpssas_remove_complete;
593 tm->cm_complete_data = (void *)(uintptr_t)handle;
596 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
597 * They should be aborted or time out and we'll kick thus off there
600 if (TAILQ_FIRST(&targ->commands) == NULL) {
601 mps_dprint(sc, MPS_INFO, "No pending commands: starting remove_device\n");
602 mps_map_command(sc, tm);
603 targ->pending_remove_tm = NULL;
605 targ->pending_remove_tm = tm;
609 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
614 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
616 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
618 struct mpssas_target *targ;
619 struct mpssas_lun *lun;
623 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
624 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
628 * At this point, we should have no pending commands for the target.
629 * The remove target has just completed.
631 KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
632 ("%s: no commands should be pending\n", __func__));
636 * Currently there should be no way we can hit this case. It only
637 * happens when we have a failure to allocate chain frames, and
638 * task management commands don't have S/G lists.
640 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
641 mps_dprint(sc, MPS_XINFO,
642 "%s: cm_flags = %#x for remove of handle %#04x! "
643 "This should not happen!\n", __func__, tm->cm_flags,
645 mpssas_free_tm(sc, tm);
650 /* most likely a chip reset */
651 mps_dprint(sc, MPS_FAULT,
652 "%s NULL reply removing device 0x%04x\n", __func__, handle);
653 mpssas_free_tm(sc, tm);
657 mps_dprint(sc, MPS_XINFO,
658 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
659 handle, le16toh(reply->IOCStatus));
662 * Don't clear target if remove fails because things will get confusing.
663 * Leave the devname and sasaddr intact so that we know to avoid reusing
664 * this target id if possible, and so we can assign the same target id
665 * to this device if it comes back in the future.
667 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
668 MPI2_IOCSTATUS_SUCCESS) {
670 targ->encl_handle = 0x0;
671 targ->encl_slot = 0x0;
672 targ->exp_dev_handle = 0x0;
674 targ->linkrate = 0x0;
678 while(!SLIST_EMPTY(&targ->luns)) {
679 lun = SLIST_FIRST(&targ->luns);
680 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
686 mpssas_free_tm(sc, tm);
690 mpssas_register_events(struct mps_softc *sc)
692 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
695 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
696 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
697 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
698 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
699 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
700 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
701 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
702 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
703 setbit(events, MPI2_EVENT_IR_VOLUME);
704 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
705 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
706 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
708 mps_register_events(sc, events, mpssas_evt_handler, NULL,
709 &sc->sassc->mpssas_eh);
715 mps_attach_sas(struct mps_softc *sc)
717 struct mpssas_softc *sassc;
719 int unit, error = 0, reqs;
722 mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
724 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
727 * XXX MaxTargets could change during a reinit. Since we don't
728 * resize the targets[] array during such an event, cache the value
729 * of MaxTargets here so that we don't get into trouble later. This
730 * should move into the reinit logic.
732 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
733 sassc->targets = malloc(sizeof(struct mpssas_target) *
734 sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
738 reqs = sc->num_reqs - sc->num_prireqs - 1;
739 if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
740 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
745 unit = device_get_unit(sc->mps_dev);
746 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
747 unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
748 if (sassc->sim == NULL) {
749 mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
754 TAILQ_INIT(&sassc->ev_queue);
756 /* Initialize taskqueue for Event Handling */
757 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
758 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
759 taskqueue_thread_enqueue, &sassc->ev_tq);
760 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
761 device_get_nameunit(sc->mps_dev));
766 * XXX There should be a bus for every port on the adapter, but since
767 * we're just going to fake the topology for now, we'll pretend that
768 * everything is just a target on a single bus.
770 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
771 mps_dprint(sc, MPS_INIT|MPS_ERROR,
772 "Error %d registering SCSI bus\n", error);
778 * Assume that discovery events will start right away.
780 * Hold off boot until discovery is complete.
782 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
783 sc->sassc->startup_refcount = 0;
784 mpssas_startup_increment(sassc);
786 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
789 * Register for async events so we can determine the EEDP
790 * capabilities of devices.
792 status = xpt_create_path(&sassc->path, /*periph*/NULL,
793 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
795 if (status != CAM_REQ_CMP) {
796 mps_dprint(sc, MPS_ERROR|MPS_INIT,
797 "Error %#x creating sim path\n", status);
802 event = AC_ADVINFO_CHANGED;
803 status = xpt_register_async(event, mpssas_async, sc,
805 if (status != CAM_REQ_CMP) {
806 mps_dprint(sc, MPS_ERROR,
807 "Error %#x registering async handler for "
808 "AC_ADVINFO_CHANGED events\n", status);
809 xpt_free_path(sassc->path);
813 if (status != CAM_REQ_CMP) {
815 * EEDP use is the exception, not the rule.
816 * Warn the user, but do not fail to attach.
818 mps_printf(sc, "EEDP capabilities disabled.\n");
823 mpssas_register_events(sc);
828 mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
833 mps_detach_sas(struct mps_softc *sc)
835 struct mpssas_softc *sassc;
836 struct mpssas_lun *lun, *lun_tmp;
837 struct mpssas_target *targ;
842 if (sc->sassc == NULL)
846 mps_deregister_events(sc, sassc->mpssas_eh);
849 * Drain and free the event handling taskqueue with the lock
850 * unheld so that any parallel processing tasks drain properly
851 * without deadlocking.
853 if (sassc->ev_tq != NULL)
854 taskqueue_free(sassc->ev_tq);
856 /* Make sure CAM doesn't wedge if we had to bail out early. */
859 while (sassc->startup_refcount != 0)
860 mpssas_startup_decrement(sassc);
862 /* Deregister our async handler */
863 if (sassc->path != NULL) {
864 xpt_register_async(0, mpssas_async, sc, sassc->path);
865 xpt_free_path(sassc->path);
869 if (sassc->flags & MPSSAS_IN_STARTUP)
870 xpt_release_simq(sassc->sim, 1);
872 if (sassc->sim != NULL) {
873 xpt_bus_deregister(cam_sim_path(sassc->sim));
874 cam_sim_free(sassc->sim, FALSE);
879 if (sassc->devq != NULL)
880 cam_simq_free(sassc->devq);
882 for(i=0; i< sassc->maxtargets ;i++) {
883 targ = &sassc->targets[i];
884 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
888 free(sassc->targets, M_MPT2);
896 mpssas_discovery_end(struct mpssas_softc *sassc)
898 struct mps_softc *sc = sassc->sc;
902 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
903 callout_stop(&sassc->discovery_callout);
906 * After discovery has completed, check the mapping table for any
907 * missing devices and update their missing counts. Only do this once
908 * whenever the driver is initialized so that missing counts aren't
909 * updated unnecessarily. Note that just because discovery has
910 * completed doesn't mean that events have been processed yet. The
911 * check_devices function is a callout timer that checks if ALL devices
912 * are missing. If so, it will wait a little longer for events to
913 * complete and keep resetting itself until some device in the mapping
914 * table is not missing, meaning that event processing has started.
916 if (sc->track_mapping_events) {
917 mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
918 "completed. Check for missing devices in the mapping "
920 callout_reset(&sc->device_check_callout,
921 MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
927 mpssas_action(struct cam_sim *sim, union ccb *ccb)
929 struct mpssas_softc *sassc;
931 sassc = cam_sim_softc(sim);
933 MPS_FUNCTRACE(sassc->sc);
934 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
935 ccb->ccb_h.func_code);
936 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
938 switch (ccb->ccb_h.func_code) {
941 struct ccb_pathinq *cpi = &ccb->cpi;
942 struct mps_softc *sc = sassc->sc;
944 cpi->version_num = 1;
945 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
946 cpi->target_sprt = 0;
947 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
948 cpi->hba_eng_cnt = 0;
949 cpi->max_target = sassc->maxtargets - 1;
953 * initiator_id is set here to an ID outside the set of valid
954 * target IDs (including volumes).
956 cpi->initiator_id = sassc->maxtargets;
957 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
958 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
959 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
960 cpi->unit_number = cam_sim_unit(sim);
961 cpi->bus_id = cam_sim_bus(sim);
962 cpi->base_transfer_speed = 150000;
963 cpi->transport = XPORT_SAS;
964 cpi->transport_version = 0;
965 cpi->protocol = PROTO_SCSI;
966 cpi->protocol_version = SCSI_REV_SPC;
967 cpi->maxio = sc->maxio;
968 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
971 case XPT_GET_TRAN_SETTINGS:
973 struct ccb_trans_settings *cts;
974 struct ccb_trans_settings_sas *sas;
975 struct ccb_trans_settings_scsi *scsi;
976 struct mpssas_target *targ;
979 sas = &cts->xport_specific.sas;
980 scsi = &cts->proto_specific.scsi;
982 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
983 ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
984 cts->ccb_h.target_id));
985 targ = &sassc->targets[cts->ccb_h.target_id];
986 if (targ->handle == 0x0) {
987 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
991 cts->protocol_version = SCSI_REV_SPC2;
992 cts->transport = XPORT_SAS;
993 cts->transport_version = 0;
995 sas->valid = CTS_SAS_VALID_SPEED;
996 switch (targ->linkrate) {
998 sas->bitrate = 150000;
1001 sas->bitrate = 300000;
1004 sas->bitrate = 600000;
1010 cts->protocol = PROTO_SCSI;
1011 scsi->valid = CTS_SCSI_VALID_TQ;
1012 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1014 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1017 case XPT_CALC_GEOMETRY:
1018 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1019 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1022 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1023 mpssas_action_resetdev(sassc, ccb);
1028 mps_dprint(sassc->sc, MPS_XINFO,
1029 "mpssas_action faking success for abort or reset\n");
1030 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1033 mpssas_action_scsiio(sassc, ccb);
1036 mpssas_action_smpio(sassc, ccb);
1039 mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1047 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1048 target_id_t target_id, lun_id_t lun_id)
1050 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1051 struct cam_path *path;
1053 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1054 ac_code, target_id, (uintmax_t)lun_id);
1056 if (xpt_create_path(&path, NULL,
1057 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1058 mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1063 xpt_async(ac_code, path, NULL);
1064 xpt_free_path(path);
1068 mpssas_complete_all_commands(struct mps_softc *sc)
1070 struct mps_command *cm;
1075 mtx_assert(&sc->mps_mtx, MA_OWNED);
1077 /* complete all commands with a NULL reply */
1078 for (i = 1; i < sc->num_reqs; i++) {
1079 cm = &sc->commands[i];
1080 if (cm->cm_state == MPS_CM_STATE_FREE)
1083 cm->cm_state = MPS_CM_STATE_BUSY;
1084 cm->cm_reply = NULL;
1087 if (cm->cm_flags & MPS_CM_FLAGS_SATA_ID_TIMEOUT) {
1089 free(cm->cm_data, M_MPT2);
1093 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1094 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1096 if (cm->cm_complete != NULL) {
1097 mpssas_log_command(cm, MPS_RECOVERY,
1098 "completing cm %p state %x ccb %p for diag reset\n",
1099 cm, cm->cm_state, cm->cm_ccb);
1101 cm->cm_complete(sc, cm);
1103 } else if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1104 mpssas_log_command(cm, MPS_RECOVERY,
1105 "waking up cm %p state %x ccb %p for diag reset\n",
1106 cm, cm->cm_state, cm->cm_ccb);
1111 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1112 /* this should never happen, but if it does, log */
1113 mpssas_log_command(cm, MPS_RECOVERY,
1114 "cm %p state %x flags 0x%x ccb %p during diag "
1115 "reset\n", cm, cm->cm_state, cm->cm_flags,
1120 sc->io_cmds_active = 0;
1124 mpssas_handle_reinit(struct mps_softc *sc)
1128 /* Go back into startup mode and freeze the simq, so that CAM
1129 * doesn't send any commands until after we've rediscovered all
1130 * targets and found the proper device handles for them.
1132 * After the reset, portenable will trigger discovery, and after all
1133 * discovery-related activities have finished, the simq will be
1136 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1137 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1138 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1139 mpssas_startup_increment(sc->sassc);
1141 /* notify CAM of a bus reset */
1142 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1145 /* complete and cleanup after all outstanding commands */
1146 mpssas_complete_all_commands(sc);
1148 mps_dprint(sc, MPS_INIT,
1149 "%s startup %u after command completion\n", __func__,
1150 sc->sassc->startup_refcount);
1152 /* zero all the target handles, since they may change after the
1153 * reset, and we have to rediscover all the targets and use the new
1156 for (i = 0; i < sc->sassc->maxtargets; i++) {
1157 if (sc->sassc->targets[i].outstanding != 0)
1158 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1159 i, sc->sassc->targets[i].outstanding);
1160 sc->sassc->targets[i].handle = 0x0;
1161 sc->sassc->targets[i].exp_dev_handle = 0x0;
1162 sc->sassc->targets[i].outstanding = 0;
1163 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1168 mpssas_tm_timeout(void *data)
1170 struct mps_command *tm = data;
1171 struct mps_softc *sc = tm->cm_sc;
1173 mtx_assert(&sc->mps_mtx, MA_OWNED);
1175 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1176 "task mgmt %p timed out\n", tm);
1178 KASSERT(tm->cm_state == MPS_CM_STATE_INQUEUE,
1179 ("command not inqueue\n"));
1181 tm->cm_state = MPS_CM_STATE_BUSY;
1186 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1188 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1189 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1190 unsigned int cm_count = 0;
1191 struct mps_command *cm;
1192 struct mpssas_target *targ;
1194 callout_stop(&tm->cm_callout);
1196 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1197 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1201 * Currently there should be no way we can hit this case. It only
1202 * happens when we have a failure to allocate chain frames, and
1203 * task management commands don't have S/G lists.
1204 * XXXSL So should it be an assertion?
1206 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1207 mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
1208 "%s: cm_flags = %#x for LUN reset! "
1209 "This should not happen!\n", __func__, tm->cm_flags);
1210 mpssas_free_tm(sc, tm);
1214 if (reply == NULL) {
1215 mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
1217 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1218 /* this completion was due to a reset, just cleanup */
1219 mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1220 "reset, ignoring NULL LUN reset reply\n");
1222 mpssas_free_tm(sc, tm);
1225 /* we should have gotten a reply. */
1226 mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1227 "LUN reset attempt, resetting controller\n");
1233 mps_dprint(sc, MPS_RECOVERY,
1234 "logical unit reset status 0x%x code 0x%x count %u\n",
1235 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1236 le32toh(reply->TerminationCount));
1239 * See if there are any outstanding commands for this LUN.
1240 * This could be made more efficient by using a per-LU data
1241 * structure of some sort.
1243 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1244 if (cm->cm_lun == tm->cm_lun)
1248 if (cm_count == 0) {
1249 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1250 "Finished recovery after LUN reset for target %u\n",
1253 mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
1256 * We've finished recovery for this logical unit. check and
1257 * see if some other logical unit has a timedout command
1258 * that needs to be processed.
1260 cm = TAILQ_FIRST(&targ->timedout_commands);
1262 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1263 "More commands to abort for target %u\n",
1265 mpssas_send_abort(sc, tm, cm);
1268 mpssas_free_tm(sc, tm);
1272 * If we still have commands for this LUN, the reset
1273 * effectively failed, regardless of the status reported.
1274 * Escalate to a target reset.
1276 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1277 "logical unit reset complete for target %u, but still "
1278 "have %u command(s), sending target reset\n", targ->tid,
1280 mpssas_send_reset(sc, tm,
1281 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1286 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1288 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1289 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1290 struct mpssas_target *targ;
1292 callout_stop(&tm->cm_callout);
1294 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1295 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1299 * Currently there should be no way we can hit this case. It only
1300 * happens when we have a failure to allocate chain frames, and
1301 * task management commands don't have S/G lists.
1303 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1304 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1305 "This should not happen!\n", __func__, tm->cm_flags);
1306 mpssas_free_tm(sc, tm);
1310 if (reply == NULL) {
1311 mps_dprint(sc, MPS_RECOVERY,
1312 "NULL target reset reply for tm %pi TaskMID %u\n",
1313 tm, le16toh(req->TaskMID));
1314 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1315 /* this completion was due to a reset, just cleanup */
1316 mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1317 "reset, ignoring NULL target reset reply\n");
1319 mpssas_free_tm(sc, tm);
1321 /* we should have gotten a reply. */
1322 mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1323 "target reset attempt, resetting controller\n");
1329 mps_dprint(sc, MPS_RECOVERY,
1330 "target reset status 0x%x code 0x%x count %u\n",
1331 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1332 le32toh(reply->TerminationCount));
1334 if (targ->outstanding == 0) {
1335 /* we've finished recovery for this target and all
1336 * of its logical units.
1338 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1339 "Finished reset recovery for target %u\n", targ->tid);
1341 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1345 mpssas_free_tm(sc, tm);
1348 * After a target reset, if this target still has
1349 * outstanding commands, the reset effectively failed,
1350 * regardless of the status reported. escalate.
1352 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1353 "Target reset complete for target %u, but still have %u "
1354 "command(s), resetting controller\n", targ->tid,
1360 #define MPS_RESET_TIMEOUT 30
1363 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1365 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1366 struct mpssas_target *target;
1369 target = tm->cm_targ;
1370 if (target->handle == 0) {
1371 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1372 __func__, target->tid);
1376 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1377 req->DevHandle = htole16(target->handle);
1378 req->TaskType = type;
1380 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1381 /* XXX Need to handle invalid LUNs */
1382 MPS_SET_LUN(req->LUN, tm->cm_lun);
1383 tm->cm_targ->logical_unit_resets++;
1384 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1385 "Sending logical unit reset to target %u lun %d\n",
1386 target->tid, tm->cm_lun);
1387 tm->cm_complete = mpssas_logical_unit_reset_complete;
1388 mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1389 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1391 * Target reset method =
1392 * SAS Hard Link Reset / SATA Link Reset
1394 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1395 tm->cm_targ->target_resets++;
1396 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1397 "Sending target reset to target %u\n", target->tid);
1398 tm->cm_complete = mpssas_target_reset_complete;
1399 mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1401 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1406 tm->cm_complete_data = (void *)tm;
1408 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1409 mpssas_tm_timeout, tm);
1411 err = mps_map_command(sc, tm);
1413 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1414 "error %d sending reset type %u\n",
1422 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1424 struct mps_command *cm;
1425 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1426 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1427 struct mpssas_target *targ;
1429 callout_stop(&tm->cm_callout);
1431 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1432 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1436 * Currently there should be no way we can hit this case. It only
1437 * happens when we have a failure to allocate chain frames, and
1438 * task management commands don't have S/G lists.
1440 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1441 mps_dprint(sc, MPS_RECOVERY,
1442 "cm_flags = %#x for abort %p TaskMID %u!\n",
1443 tm->cm_flags, tm, le16toh(req->TaskMID));
1444 mpssas_free_tm(sc, tm);
1448 if (reply == NULL) {
1449 mps_dprint(sc, MPS_RECOVERY,
1450 "NULL abort reply for tm %p TaskMID %u\n",
1451 tm, le16toh(req->TaskMID));
1452 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1453 /* this completion was due to a reset, just cleanup */
1454 mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1455 "reset, ignoring NULL abort reply\n");
1457 mpssas_free_tm(sc, tm);
1459 /* we should have gotten a reply. */
1460 mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1461 "abort attempt, resetting controller\n");
1467 mps_dprint(sc, MPS_RECOVERY,
1468 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1469 le16toh(req->TaskMID),
1470 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1471 le32toh(reply->TerminationCount));
1473 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1476 * If there are no more timedout commands, we're done with
1477 * error recovery for this target.
1479 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1480 "Finished abort recovery for target %u\n", targ->tid);
1483 mpssas_free_tm(sc, tm);
1484 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1485 /* abort success, but we have more timedout commands to abort */
1486 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1487 "Continuing abort recovery for target %u\n", targ->tid);
1489 mpssas_send_abort(sc, tm, cm);
1491 /* we didn't get a command completion, so the abort
1492 * failed as far as we're concerned. escalate.
1494 mps_dprint(sc, MPS_RECOVERY,
1495 "Abort failed for target %u, sending logical unit reset\n",
1498 mpssas_send_reset(sc, tm,
1499 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1503 #define MPS_ABORT_TIMEOUT 5
1506 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1508 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1509 struct mpssas_target *targ;
1513 if (targ->handle == 0) {
1514 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1515 "%s null devhandle for target_id %d\n",
1516 __func__, cm->cm_ccb->ccb_h.target_id);
1520 mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1521 "Aborting command %p\n", cm);
1523 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1524 req->DevHandle = htole16(targ->handle);
1525 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1527 /* XXX Need to handle invalid LUNs */
1528 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1530 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1533 tm->cm_complete = mpssas_abort_complete;
1534 tm->cm_complete_data = (void *)tm;
1535 tm->cm_targ = cm->cm_targ;
1536 tm->cm_lun = cm->cm_lun;
1538 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1539 mpssas_tm_timeout, tm);
1543 mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1545 err = mps_map_command(sc, tm);
1547 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1548 "error %d sending abort for cm %p SMID %u\n",
1549 err, cm, req->TaskMID);
1554 mpssas_scsiio_timeout(void *data)
1556 sbintime_t elapsed, now;
1558 struct mps_softc *sc;
1559 struct mps_command *cm;
1560 struct mpssas_target *targ;
1562 cm = (struct mps_command *)data;
1568 mtx_assert(&sc->mps_mtx, MA_OWNED);
1570 mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", sc);
1573 * Run the interrupt handler to make sure it's not pending. This
1574 * isn't perfect because the command could have already completed
1575 * and been re-used, though this is unlikely.
1577 mps_intr_locked(sc);
1578 if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
1579 mpssas_log_command(cm, MPS_XINFO,
1580 "SCSI command %p almost timed out\n", cm);
1584 if (cm->cm_ccb == NULL) {
1585 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1592 elapsed = now - ccb->ccb_h.qos.sim_data;
1593 mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
1594 "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
1595 targ->tid, targ->handle, ccb->ccb_h.timeout,
1596 sbintime_getsec(elapsed), elapsed & 0xffffffff);
1598 /* XXX first, check the firmware state, to see if it's still
1599 * operational. if not, do a diag reset.
1601 mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1602 cm->cm_flags |= MPS_CM_FLAGS_ON_RECOVERY | MPS_CM_FLAGS_TIMEDOUT;
1603 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1605 if (targ->tm != NULL) {
1606 /* target already in recovery, just queue up another
1607 * timedout command to be processed later.
1609 mps_dprint(sc, MPS_RECOVERY,
1610 "queued timedout cm %p for processing by tm %p\n",
1612 } else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1613 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1614 "Sending abort to target %u for SMID %d\n", targ->tid,
1615 cm->cm_desc.Default.SMID);
1616 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1619 /* start recovery by aborting the first timedout command */
1620 mpssas_send_abort(sc, targ->tm, cm);
1622 /* XXX queue this target up for recovery once a TM becomes
1623 * available. The firmware only has a limited number of
1624 * HighPriority credits for the high priority requests used
1625 * for task management, and we ran out.
1627 * Isilon: don't worry about this for now, since we have
1628 * more credits than disks in an enclosure, and limit
1629 * ourselves to one TM per target for recovery.
1631 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1632 "timedout cm %p failed to allocate a tm\n", cm);
1638 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1640 MPI2_SCSI_IO_REQUEST *req;
1641 struct ccb_scsiio *csio;
1642 struct mps_softc *sc;
1643 struct mpssas_target *targ;
1644 struct mpssas_lun *lun;
1645 struct mps_command *cm;
1646 uint8_t i, lba_byte, *ref_tag_addr;
1647 uint16_t eedp_flags;
1648 uint32_t mpi_control;
1652 mtx_assert(&sc->mps_mtx, MA_OWNED);
1655 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1656 ("Target %d out of bounds in XPT_SCSI_IO\n",
1657 csio->ccb_h.target_id));
1658 targ = &sassc->targets[csio->ccb_h.target_id];
1659 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1660 if (targ->handle == 0x0) {
1661 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1662 __func__, csio->ccb_h.target_id);
1663 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1667 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1668 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1669 "supported %u\n", __func__, csio->ccb_h.target_id);
1670 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1675 * Sometimes, it is possible to get a command that is not "In
1676 * Progress" and was actually aborted by the upper layer. Check for
1677 * this here and complete the command without error.
1679 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1680 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1681 "target %u\n", __func__, csio->ccb_h.target_id);
1686 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1687 * that the volume has timed out. We want volumes to be enumerated
1688 * until they are deleted/removed, not just failed. In either event,
1689 * we're removing the target due to a firmware event telling us
1690 * the device is now gone (as opposed to some transient event). Since
1691 * we're opting to remove failed devices from the OS's view, we need
1692 * to propagate that status up the stack.
1694 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1695 if (targ->devinfo == 0)
1696 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1698 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1703 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1704 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1705 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1711 * If target has a reset in progress, freeze the devq and return. The
1712 * devq will be released when the TM reset is finished.
1714 if (targ->flags & MPSSAS_TARGET_INRESET) {
1715 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1716 mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1717 __func__, targ->tid);
1718 xpt_freeze_devq(ccb->ccb_h.path, 1);
1723 cm = mps_alloc_command(sc);
1724 if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1726 mps_free_command(sc, cm);
1728 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1729 xpt_freeze_simq(sassc->sim, 1);
1730 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1732 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1733 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1738 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1739 bzero(req, sizeof(*req));
1740 req->DevHandle = htole16(targ->handle);
1741 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1743 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1744 req->SenseBufferLength = MPS_SENSE_LEN;
1746 req->ChainOffset = 0;
1747 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1752 req->DataLength = htole32(csio->dxfer_len);
1753 req->BidirectionalDataLength = 0;
1754 req->IoFlags = htole16(csio->cdb_len);
1757 /* Note: BiDirectional transfers are not supported */
1758 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1760 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1761 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1764 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1765 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1769 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1773 if (csio->cdb_len == 32)
1774 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1776 * It looks like the hardware doesn't require an explicit tag
1777 * number for each transaction. SAM Task Management not supported
1780 switch (csio->tag_action) {
1781 case MSG_HEAD_OF_Q_TAG:
1782 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1784 case MSG_ORDERED_Q_TAG:
1785 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1788 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1790 case CAM_TAG_ACTION_NONE:
1791 case MSG_SIMPLE_Q_TAG:
1793 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1796 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1797 req->Control = htole32(mpi_control);
1798 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1799 mps_free_command(sc, cm);
1800 mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1805 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1806 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1808 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1809 req->IoFlags = htole16(csio->cdb_len);
1812 * Check if EEDP is supported and enabled. If it is then check if the
1813 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1814 * is formatted for EEDP support. If all of this is true, set CDB up
1815 * for EEDP transfer.
1817 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1818 if (sc->eedp_enabled && eedp_flags) {
1819 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1820 if (lun->lun_id == csio->ccb_h.target_lun) {
1825 if ((lun != NULL) && (lun->eedp_formatted)) {
1826 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1827 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1828 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1829 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1830 req->EEDPFlags = htole16(eedp_flags);
1833 * If CDB less than 32, fill in Primary Ref Tag with
1834 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1835 * already there. Also, set protection bit. FreeBSD
1836 * currently does not support CDBs bigger than 16, but
1837 * the code doesn't hurt, and will be here for the
1840 if (csio->cdb_len != 32) {
1841 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1842 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1843 PrimaryReferenceTag;
1844 for (i = 0; i < 4; i++) {
1846 req->CDB.CDB32[lba_byte + i];
1849 req->CDB.EEDP32.PrimaryReferenceTag =
1850 htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1851 req->CDB.EEDP32.PrimaryApplicationTagMask =
1853 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1857 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1858 req->EEDPFlags = htole16(eedp_flags);
1859 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1865 cm->cm_length = csio->dxfer_len;
1866 if (cm->cm_length != 0) {
1868 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1872 cm->cm_sge = &req->SGL;
1873 cm->cm_sglsize = (32 - 24) * 4;
1874 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1875 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1876 cm->cm_complete = mpssas_scsiio_complete;
1877 cm->cm_complete_data = ccb;
1879 cm->cm_lun = csio->ccb_h.target_lun;
1883 * If HBA is a WD and the command is not for a retry, try to build a
1884 * direct I/O message. If failed, or the command is for a retry, send
1885 * the I/O to the IR volume itself.
1887 if (sc->WD_valid_config) {
1888 if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1889 mpssas_direct_drive_io(sassc, cm, ccb);
1891 mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1895 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1896 if (csio->bio != NULL)
1897 biotrack(csio->bio, __func__);
1899 csio->ccb_h.qos.sim_data = sbinuptime();
1900 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1901 mpssas_scsiio_timeout, cm, 0);
1904 targ->outstanding++;
1905 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1906 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1908 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1909 __func__, cm, ccb, targ->outstanding);
1911 mps_map_command(sc, cm);
1916 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1919 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1920 Mpi2SCSIIOReply_t *mpi_reply)
1924 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1925 MPI2_IOCSTATUS_MASK;
1926 u8 scsi_state = mpi_reply->SCSIState;
1927 u8 scsi_status = mpi_reply->SCSIStatus;
1928 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1929 const char *desc_ioc_state, *desc_scsi_status;
1931 if (log_info == 0x31170000)
1934 desc_ioc_state = mps_describe_table(mps_iocstatus_string,
1936 desc_scsi_status = mps_describe_table(mps_scsi_status_string,
1939 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
1940 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
1943 *We can add more detail about underflow data here
1946 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
1947 "scsi_state %b\n", desc_scsi_status, scsi_status,
1948 scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
1949 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
1951 if (sc->mps_debug & MPS_XINFO &&
1952 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1953 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
1954 scsi_sense_print(csio);
1955 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
1958 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1959 response_info = le32toh(mpi_reply->ResponseInfo);
1960 response_bytes = (u8 *)&response_info;
1961 mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
1963 mps_describe_table(mps_scsi_taskmgmt_string,
1964 response_bytes[0]));
1969 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1971 MPI2_SCSI_IO_REPLY *rep;
1973 struct ccb_scsiio *csio;
1974 struct mpssas_softc *sassc;
1975 struct scsi_vpd_supported_page_list *vpd_list = NULL;
1976 u8 *TLR_bits, TLR_on;
1979 struct mpssas_target *target;
1980 target_id_t target_id;
1983 mps_dprint(sc, MPS_TRACE,
1984 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
1985 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1986 cm->cm_targ->outstanding);
1988 callout_stop(&cm->cm_callout);
1989 mtx_assert(&sc->mps_mtx, MA_OWNED);
1992 ccb = cm->cm_complete_data;
1994 target_id = csio->ccb_h.target_id;
1995 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1997 * XXX KDM if the chain allocation fails, does it matter if we do
1998 * the sync and unload here? It is simpler to do it in every case,
1999 * assuming it doesn't cause problems.
2001 if (cm->cm_data != NULL) {
2002 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2003 dir = BUS_DMASYNC_POSTREAD;
2004 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2005 dir = BUS_DMASYNC_POSTWRITE;
2006 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2007 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2010 cm->cm_targ->completed++;
2011 cm->cm_targ->outstanding--;
2012 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2013 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2015 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2016 if (ccb->csio.bio != NULL)
2017 biotrack(ccb->csio.bio, __func__);
2020 if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
2021 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2022 KASSERT(cm->cm_state == MPS_CM_STATE_BUSY,
2023 ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2024 cm->cm_flags &= ~MPS_CM_FLAGS_ON_RECOVERY;
2025 if (cm->cm_reply != NULL)
2026 mpssas_log_command(cm, MPS_RECOVERY,
2027 "completed timedout cm %p ccb %p during recovery "
2028 "ioc %x scsi %x state %x xfer %u\n",
2029 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2030 rep->SCSIStatus, rep->SCSIState,
2031 le32toh(rep->TransferCount));
2033 mpssas_log_command(cm, MPS_RECOVERY,
2034 "completed timedout cm %p ccb %p during recovery\n",
2036 } else if (cm->cm_targ->tm != NULL) {
2037 if (cm->cm_reply != NULL)
2038 mpssas_log_command(cm, MPS_RECOVERY,
2039 "completed cm %p ccb %p during recovery "
2040 "ioc %x scsi %x state %x xfer %u\n",
2041 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2042 rep->SCSIStatus, rep->SCSIState,
2043 le32toh(rep->TransferCount));
2045 mpssas_log_command(cm, MPS_RECOVERY,
2046 "completed cm %p ccb %p during recovery\n",
2048 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2049 mpssas_log_command(cm, MPS_RECOVERY,
2050 "reset completed cm %p ccb %p\n",
2054 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2056 * We ran into an error after we tried to map the command,
2057 * so we're getting a callback without queueing the command
2058 * to the hardware. So we set the status here, and it will
2059 * be retained below. We'll go through the "fast path",
2060 * because there can be no reply when we haven't actually
2061 * gone out to the hardware.
2063 mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2066 * Currently the only error included in the mask is
2067 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2068 * chain frames. We need to freeze the queue until we get
2069 * a command that completed without this error, which will
2070 * hopefully have some chain frames attached that we can
2071 * use. If we wanted to get smarter about it, we would
2072 * only unfreeze the queue in this condition when we're
2073 * sure that we're getting some chain frames back. That's
2074 * probably unnecessary.
2076 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2077 xpt_freeze_simq(sassc->sim, 1);
2078 sassc->flags |= MPSSAS_QUEUE_FROZEN;
2079 mps_dprint(sc, MPS_XINFO, "Error sending command, "
2080 "freezing SIM queue\n");
2085 * If this is a Start Stop Unit command and it was issued by the driver
2086 * during shutdown, decrement the refcount to account for all of the
2087 * commands that were sent. All SSU commands should be completed before
2088 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2091 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2092 mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2096 /* Take the fast path to completion */
2097 if (cm->cm_reply == NULL) {
2098 if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2099 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2100 mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2102 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2103 ccb->csio.scsi_status = SCSI_STATUS_OK;
2105 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2106 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2107 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2108 mps_dprint(sc, MPS_XINFO,
2109 "Unfreezing SIM queue\n");
2114 * There are two scenarios where the status won't be
2115 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
2116 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2118 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2120 * Freeze the dev queue so that commands are
2121 * executed in the correct order after error
2124 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2125 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2127 mps_free_command(sc, cm);
2132 mpssas_log_command(cm, MPS_XINFO,
2133 "ioc %x scsi %x state %x xfer %u\n",
2134 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2135 le32toh(rep->TransferCount));
2138 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2139 * Volume if an error occurred (normal I/O retry). Use the original
2140 * CCB, but set a flag that this will be a retry so that it's sent to
2141 * the original volume. Free the command but reuse the CCB.
2143 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2144 mps_free_command(sc, cm);
2145 ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2146 mpssas_action_scsiio(sassc, ccb);
2149 ccb->ccb_h.sim_priv.entries[0].field = 0;
2151 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2152 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2153 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2155 case MPI2_IOCSTATUS_SUCCESS:
2156 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2158 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2159 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2160 mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2162 /* Completion failed at the transport level. */
2163 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2164 MPI2_SCSI_STATE_TERMINATED)) {
2165 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2169 /* In a modern packetized environment, an autosense failure
2170 * implies that there's not much else that can be done to
2171 * recover the command.
2173 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2174 mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2179 * CAM doesn't care about SAS Response Info data, but if this is
2180 * the state check if TLR should be done. If not, clear the
2181 * TLR_bits for the target.
2183 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2184 ((le32toh(rep->ResponseInfo) &
2185 MPI2_SCSI_RI_MASK_REASONCODE) ==
2186 MPS_SCSI_RI_INVALID_FRAME)) {
2187 sc->mapping_table[target_id].TLR_bits =
2188 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2192 * Intentionally override the normal SCSI status reporting
2193 * for these two cases. These are likely to happen in a
2194 * multi-initiator environment, and we want to make sure that
2195 * CAM retries these commands rather than fail them.
2197 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2198 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2199 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2203 /* Handle normal status and sense */
2204 csio->scsi_status = rep->SCSIStatus;
2205 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2206 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2208 mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2210 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2211 int sense_len, returned_sense_len;
2213 returned_sense_len = min(le32toh(rep->SenseCount),
2214 sizeof(struct scsi_sense_data));
2215 if (returned_sense_len < ccb->csio.sense_len)
2216 ccb->csio.sense_resid = ccb->csio.sense_len -
2219 ccb->csio.sense_resid = 0;
2221 sense_len = min(returned_sense_len,
2222 ccb->csio.sense_len - ccb->csio.sense_resid);
2223 bzero(&ccb->csio.sense_data,
2224 sizeof(ccb->csio.sense_data));
2225 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2226 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2230 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2231 * and it's page code 0 (Supported Page List), and there is
2232 * inquiry data, and this is for a sequential access device, and
2233 * the device is an SSP target, and TLR is supported by the
2234 * controller, turn the TLR_bits value ON if page 0x90 is
2237 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2238 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2239 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2240 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2241 (csio->data_ptr != NULL) &&
2242 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2243 (sc->control_TLR) &&
2244 (sc->mapping_table[target_id].device_info &
2245 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2246 vpd_list = (struct scsi_vpd_supported_page_list *)
2248 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2249 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2250 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2251 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2252 csio->cdb_io.cdb_bytes[4];
2253 alloc_len -= csio->resid;
2254 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2255 if (vpd_list->list[i] == 0x90) {
2263 * If this is a SATA direct-access end device, mark it so that
2264 * a SCSI StartStopUnit command will be sent to it when the
2265 * driver is being shutdown.
2267 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2268 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2269 (sc->mapping_table[target_id].device_info &
2270 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2271 ((sc->mapping_table[target_id].device_info &
2272 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2273 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2274 target = &sassc->targets[target_id];
2275 target->supports_SSU = TRUE;
2276 mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2280 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2281 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2283 * If devinfo is 0 this will be a volume. In that case don't
2284 * tell CAM that the volume is not there. We want volumes to
2285 * be enumerated until they are deleted/removed, not just
2288 if (cm->cm_targ->devinfo == 0)
2289 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2291 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2293 case MPI2_IOCSTATUS_INVALID_SGL:
2294 mps_print_scsiio_cmd(sc, cm);
2295 mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2297 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2299 * This is one of the responses that comes back when an I/O
2300 * has been aborted. If it is because of a timeout that we
2301 * initiated, just set the status to CAM_CMD_TIMEOUT.
2302 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2303 * command is the same (it gets retried, subject to the
2304 * retry counter), the only difference is what gets printed
2307 if (cm->cm_flags & MPS_CM_FLAGS_TIMEDOUT)
2308 mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2310 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2312 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2313 /* resid is ignored for this condition */
2315 mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2317 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2318 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2320 * These can sometimes be transient transport-related
2321 * errors, and sometimes persistent drive-related errors.
2322 * We used to retry these without decrementing the retry
2323 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2324 * we hit a persistent drive problem that returns one of
2325 * these error codes, we would retry indefinitely. So,
2326 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2327 * count and avoid infinite retries. We're taking the
2328 * potential risk of flagging false failures in the event
2329 * of a topology-related error (e.g. a SAS expander problem
2330 * causes a command addressed to a drive to fail), but
2331 * avoiding getting into an infinite retry loop. However,
2332 * if we get them while were moving a device, we should
2333 * fail the request as 'not there' because the device
2334 * is effectively gone.
2336 if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL)
2337 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2339 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2340 mps_dprint(sc, MPS_INFO,
2341 "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2342 mps_describe_table(mps_iocstatus_string,
2343 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2344 target_id, cm->cm_desc.Default.SMID,
2345 le32toh(rep->IOCLogInfo),
2346 (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) ? " departing" : "");
2347 mps_dprint(sc, MPS_XINFO,
2348 "SCSIStatus %x SCSIState %x xfercount %u\n",
2349 rep->SCSIStatus, rep->SCSIState,
2350 le32toh(rep->TransferCount));
2352 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2353 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2354 case MPI2_IOCSTATUS_INVALID_VPID:
2355 case MPI2_IOCSTATUS_INVALID_FIELD:
2356 case MPI2_IOCSTATUS_INVALID_STATE:
2357 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2358 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2359 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2360 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2361 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2363 mpssas_log_command(cm, MPS_XINFO,
2364 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2365 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2366 rep->SCSIStatus, rep->SCSIState,
2367 le32toh(rep->TransferCount));
2368 csio->resid = cm->cm_length;
2369 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2373 mps_sc_failed_io_info(sc,csio,rep);
2375 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2376 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2377 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2378 mps_dprint(sc, MPS_XINFO, "Command completed, "
2379 "unfreezing SIM queue\n");
2382 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2383 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2384 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2388 * Check to see if we're removing the device. If so, and this is the
2389 * last command on the queue, proceed with the deferred removal of the
2390 * device. Note, for removing a volume, this won't trigger because
2391 * pending_remove_tm will be NULL.
2393 if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) {
2394 if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2395 cm->cm_targ->pending_remove_tm != NULL) {
2396 mps_dprint(sc, MPS_INFO, "Last pending command complete: starting remove_device\n");
2397 mps_map_command(sc, cm->cm_targ->pending_remove_tm);
2398 cm->cm_targ->pending_remove_tm = NULL;
2402 mps_free_command(sc, cm);
2406 /* All Request reached here are Endian safe */
2408 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2410 pMpi2SCSIIORequest_t pIO_req;
2411 struct mps_softc *sc = sassc->sc;
2413 uint32_t physLBA, stripe_offset, stripe_unit;
2414 uint32_t io_size, column;
2415 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2418 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2419 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2420 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2421 * bit different than the 10/16 CDBs, handle them separately.
2423 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2424 CDB = pIO_req->CDB.CDB32;
2427 * Handle 6 byte CDBs.
2429 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2430 (CDB[0] == WRITE_6))) {
2432 * Get the transfer size in blocks.
2434 io_size = (cm->cm_length >> sc->DD_block_exponent);
2437 * Get virtual LBA given in the CDB.
2439 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2440 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2443 * Check that LBA range for I/O does not exceed volume's
2446 if ((virtLBA + (uint64_t)io_size - 1) <=
2449 * Check if the I/O crosses a stripe boundary. If not,
2450 * translate the virtual LBA to a physical LBA and set
2451 * the DevHandle for the PhysDisk to be used. If it
2452 * does cross a boundary, do normal I/O. To get the
2453 * right DevHandle to use, get the map number for the
2454 * column, then use that map number to look up the
2455 * DevHandle of the PhysDisk.
2457 stripe_offset = (uint32_t)virtLBA &
2458 (sc->DD_stripe_size - 1);
2459 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2460 physLBA = (uint32_t)virtLBA >>
2461 sc->DD_stripe_exponent;
2462 stripe_unit = physLBA / sc->DD_num_phys_disks;
2463 column = physLBA % sc->DD_num_phys_disks;
2464 pIO_req->DevHandle =
2465 htole16(sc->DD_column_map[column].dev_handle);
2466 /* ???? Is this endian safe*/
2467 cm->cm_desc.SCSIIO.DevHandle =
2470 physLBA = (stripe_unit <<
2471 sc->DD_stripe_exponent) + stripe_offset;
2472 ptrLBA = &pIO_req->CDB.CDB32[1];
2473 physLBA_byte = (uint8_t)(physLBA >> 16);
2474 *ptrLBA = physLBA_byte;
2475 ptrLBA = &pIO_req->CDB.CDB32[2];
2476 physLBA_byte = (uint8_t)(physLBA >> 8);
2477 *ptrLBA = physLBA_byte;
2478 ptrLBA = &pIO_req->CDB.CDB32[3];
2479 physLBA_byte = (uint8_t)physLBA;
2480 *ptrLBA = physLBA_byte;
2483 * Set flag that Direct Drive I/O is
2486 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2493 * Handle 10, 12 or 16 byte CDBs.
2495 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2496 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2497 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2498 (CDB[0] == WRITE_12))) {
2500 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2501 * are 0. If not, this is accessing beyond 2TB so handle it in
2502 * the else section. 10-byte and 12-byte CDB's are OK.
2503 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2504 * ready to accept 12byte CDB for Direct IOs.
2506 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2507 (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2508 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2510 * Get the transfer size in blocks.
2512 io_size = (cm->cm_length >> sc->DD_block_exponent);
2515 * Get virtual LBA. Point to correct lower 4 bytes of
2516 * LBA in the CDB depending on command.
2518 lba_idx = ((CDB[0] == READ_12) ||
2519 (CDB[0] == WRITE_12) ||
2520 (CDB[0] == READ_10) ||
2521 (CDB[0] == WRITE_10))? 2 : 6;
2522 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2523 ((uint64_t)CDB[lba_idx + 1] << 16) |
2524 ((uint64_t)CDB[lba_idx + 2] << 8) |
2525 (uint64_t)CDB[lba_idx + 3];
2528 * Check that LBA range for I/O does not exceed volume's
2531 if ((virtLBA + (uint64_t)io_size - 1) <=
2534 * Check if the I/O crosses a stripe boundary.
2535 * If not, translate the virtual LBA to a
2536 * physical LBA and set the DevHandle for the
2537 * PhysDisk to be used. If it does cross a
2538 * boundary, do normal I/O. To get the right
2539 * DevHandle to use, get the map number for the
2540 * column, then use that map number to look up
2541 * the DevHandle of the PhysDisk.
2543 stripe_offset = (uint32_t)virtLBA &
2544 (sc->DD_stripe_size - 1);
2545 if ((stripe_offset + io_size) <=
2546 sc->DD_stripe_size) {
2547 physLBA = (uint32_t)virtLBA >>
2548 sc->DD_stripe_exponent;
2549 stripe_unit = physLBA /
2550 sc->DD_num_phys_disks;
2552 sc->DD_num_phys_disks;
2553 pIO_req->DevHandle =
2554 htole16(sc->DD_column_map[column].
2556 cm->cm_desc.SCSIIO.DevHandle =
2559 physLBA = (stripe_unit <<
2560 sc->DD_stripe_exponent) +
2563 &pIO_req->CDB.CDB32[lba_idx];
2564 physLBA_byte = (uint8_t)(physLBA >> 24);
2565 *ptrLBA = physLBA_byte;
2567 &pIO_req->CDB.CDB32[lba_idx + 1];
2568 physLBA_byte = (uint8_t)(physLBA >> 16);
2569 *ptrLBA = physLBA_byte;
2571 &pIO_req->CDB.CDB32[lba_idx + 2];
2572 physLBA_byte = (uint8_t)(physLBA >> 8);
2573 *ptrLBA = physLBA_byte;
2575 &pIO_req->CDB.CDB32[lba_idx + 3];
2576 physLBA_byte = (uint8_t)physLBA;
2577 *ptrLBA = physLBA_byte;
2580 * Set flag that Direct Drive I/O is
2583 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2588 * 16-byte CDB and the upper 4 bytes of the CDB are not
2589 * 0. Get the transfer size in blocks.
2591 io_size = (cm->cm_length >> sc->DD_block_exponent);
2596 virtLBA = ((uint64_t)CDB[2] << 54) |
2597 ((uint64_t)CDB[3] << 48) |
2598 ((uint64_t)CDB[4] << 40) |
2599 ((uint64_t)CDB[5] << 32) |
2600 ((uint64_t)CDB[6] << 24) |
2601 ((uint64_t)CDB[7] << 16) |
2602 ((uint64_t)CDB[8] << 8) |
2606 * Check that LBA range for I/O does not exceed volume's
2609 if ((virtLBA + (uint64_t)io_size - 1) <=
2612 * Check if the I/O crosses a stripe boundary.
2613 * If not, translate the virtual LBA to a
2614 * physical LBA and set the DevHandle for the
2615 * PhysDisk to be used. If it does cross a
2616 * boundary, do normal I/O. To get the right
2617 * DevHandle to use, get the map number for the
2618 * column, then use that map number to look up
2619 * the DevHandle of the PhysDisk.
2621 stripe_offset = (uint32_t)virtLBA &
2622 (sc->DD_stripe_size - 1);
2623 if ((stripe_offset + io_size) <=
2624 sc->DD_stripe_size) {
2625 physLBA = (uint32_t)(virtLBA >>
2626 sc->DD_stripe_exponent);
2627 stripe_unit = physLBA /
2628 sc->DD_num_phys_disks;
2630 sc->DD_num_phys_disks;
2631 pIO_req->DevHandle =
2632 htole16(sc->DD_column_map[column].
2634 cm->cm_desc.SCSIIO.DevHandle =
2637 physLBA = (stripe_unit <<
2638 sc->DD_stripe_exponent) +
2642 * Set upper 4 bytes of LBA to 0. We
2643 * assume that the phys disks are less
2644 * than 2 TB's in size. Then, set the
2647 pIO_req->CDB.CDB32[2] = 0;
2648 pIO_req->CDB.CDB32[3] = 0;
2649 pIO_req->CDB.CDB32[4] = 0;
2650 pIO_req->CDB.CDB32[5] = 0;
2651 ptrLBA = &pIO_req->CDB.CDB32[6];
2652 physLBA_byte = (uint8_t)(physLBA >> 24);
2653 *ptrLBA = physLBA_byte;
2654 ptrLBA = &pIO_req->CDB.CDB32[7];
2655 physLBA_byte = (uint8_t)(physLBA >> 16);
2656 *ptrLBA = physLBA_byte;
2657 ptrLBA = &pIO_req->CDB.CDB32[8];
2658 physLBA_byte = (uint8_t)(physLBA >> 8);
2659 *ptrLBA = physLBA_byte;
2660 ptrLBA = &pIO_req->CDB.CDB32[9];
2661 physLBA_byte = (uint8_t)physLBA;
2662 *ptrLBA = physLBA_byte;
2665 * Set flag that Direct Drive I/O is
2668 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2676 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2678 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2679 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2683 ccb = cm->cm_complete_data;
2686 * Currently there should be no way we can hit this case. It only
2687 * happens when we have a failure to allocate chain frames, and SMP
2688 * commands require two S/G elements only. That should be handled
2689 * in the standard request size.
2691 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2692 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2693 __func__, cm->cm_flags);
2694 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2698 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2700 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2701 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2705 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2706 sasaddr = le32toh(req->SASAddress.Low);
2707 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2709 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2710 MPI2_IOCSTATUS_SUCCESS ||
2711 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2712 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2713 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2714 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2718 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2719 "%#jx completed successfully\n", __func__,
2720 (uintmax_t)sasaddr);
2722 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2723 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2725 mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2729 * We sync in both directions because we had DMAs in the S/G list
2730 * in both directions.
2732 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2733 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2734 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2735 mps_free_command(sc, cm);
2740 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2742 struct mps_command *cm;
2743 uint8_t *request, *response;
2744 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2745 struct mps_softc *sc;
2752 * XXX We don't yet support physical addresses here.
2754 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2755 case CAM_DATA_PADDR:
2756 case CAM_DATA_SG_PADDR:
2757 mps_dprint(sc, MPS_ERROR,
2758 "%s: physical addresses not supported\n", __func__);
2759 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2764 * The chip does not support more than one buffer for the
2765 * request or response.
2767 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2768 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2769 mps_dprint(sc, MPS_ERROR,
2770 "%s: multiple request or response "
2771 "buffer segments not supported for SMP\n",
2773 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2779 * The CAM_SCATTER_VALID flag was originally implemented
2780 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2781 * We have two. So, just take that flag to mean that we
2782 * might have S/G lists, and look at the S/G segment count
2783 * to figure out whether that is the case for each individual
2786 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2787 bus_dma_segment_t *req_sg;
2789 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2790 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2792 request = ccb->smpio.smp_request;
2794 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2795 bus_dma_segment_t *rsp_sg;
2797 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2798 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2800 response = ccb->smpio.smp_response;
2802 case CAM_DATA_VADDR:
2803 request = ccb->smpio.smp_request;
2804 response = ccb->smpio.smp_response;
2807 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2812 cm = mps_alloc_command(sc);
2814 mps_dprint(sc, MPS_ERROR,
2815 "%s: cannot allocate command\n", __func__);
2816 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2821 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2822 bzero(req, sizeof(*req));
2823 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2825 /* Allow the chip to use any route to this SAS address. */
2826 req->PhysicalPort = 0xff;
2828 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2830 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2832 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2833 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2835 mpi_init_sge(cm, req, &req->SGL);
2838 * Set up a uio to pass into mps_map_command(). This allows us to
2839 * do one map command, and one busdma call in there.
2841 cm->cm_uio.uio_iov = cm->cm_iovec;
2842 cm->cm_uio.uio_iovcnt = 2;
2843 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2846 * The read/write flag isn't used by busdma, but set it just in
2847 * case. This isn't exactly accurate, either, since we're going in
2850 cm->cm_uio.uio_rw = UIO_WRITE;
2852 cm->cm_iovec[0].iov_base = request;
2853 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2854 cm->cm_iovec[1].iov_base = response;
2855 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2857 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2858 cm->cm_iovec[1].iov_len;
2861 * Trigger a warning message in mps_data_cb() for the user if we
2862 * wind up exceeding two S/G segments. The chip expects one
2863 * segment for the request and another for the response.
2865 cm->cm_max_segs = 2;
2867 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2868 cm->cm_complete = mpssas_smpio_complete;
2869 cm->cm_complete_data = ccb;
2872 * Tell the mapping code that we're using a uio, and that this is
2873 * an SMP passthrough request. There is a little special-case
2874 * logic there (in mps_data_cb()) to handle the bidirectional
2877 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2878 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2880 /* The chip data format is little endian. */
2881 req->SASAddress.High = htole32(sasaddr >> 32);
2882 req->SASAddress.Low = htole32(sasaddr);
2885 * XXX Note that we don't have a timeout/abort mechanism here.
2886 * From the manual, it looks like task management requests only
2887 * work for SCSI IO and SATA passthrough requests. We may need to
2888 * have a mechanism to retry requests in the event of a chip reset
2889 * at least. Hopefully the chip will insure that any errors short
2890 * of that are relayed back to the driver.
2892 error = mps_map_command(sc, cm);
2893 if ((error != 0) && (error != EINPROGRESS)) {
2894 mps_dprint(sc, MPS_ERROR,
2895 "%s: error %d returned from mps_map_command()\n",
2903 mps_free_command(sc, cm);
2904 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2911 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2913 struct mps_softc *sc;
2914 struct mpssas_target *targ;
2915 uint64_t sasaddr = 0;
2920 * Make sure the target exists.
2922 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2923 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2924 targ = &sassc->targets[ccb->ccb_h.target_id];
2925 if (targ->handle == 0x0) {
2926 mps_dprint(sc, MPS_ERROR,
2927 "%s: target %d does not exist!\n", __func__,
2928 ccb->ccb_h.target_id);
2929 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2935 * If this device has an embedded SMP target, we'll talk to it
2937 * figure out what the expander's address is.
2939 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2940 sasaddr = targ->sasaddr;
2943 * If we don't have a SAS address for the expander yet, try
2944 * grabbing it from the page 0x83 information cached in the
2945 * transport layer for this target. LSI expanders report the
2946 * expander SAS address as the port-associated SAS address in
2947 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2950 * XXX KDM disable this for now, but leave it commented out so that
2951 * it is obvious that this is another possible way to get the SAS
2954 * The parent handle method below is a little more reliable, and
2955 * the other benefit is that it works for devices other than SES
2956 * devices. So you can send a SMP request to a da(4) device and it
2957 * will get routed to the expander that device is attached to.
2958 * (Assuming the da(4) device doesn't contain an SMP target...)
2962 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2966 * If we still don't have a SAS address for the expander, look for
2967 * the parent device of this device, which is probably the expander.
2970 #ifdef OLD_MPS_PROBE
2971 struct mpssas_target *parent_target;
2974 if (targ->parent_handle == 0x0) {
2975 mps_dprint(sc, MPS_ERROR,
2976 "%s: handle %d does not have a valid "
2977 "parent handle!\n", __func__, targ->handle);
2978 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2981 #ifdef OLD_MPS_PROBE
2982 parent_target = mpssas_find_target_by_handle(sassc, 0,
2983 targ->parent_handle);
2985 if (parent_target == NULL) {
2986 mps_dprint(sc, MPS_ERROR,
2987 "%s: handle %d does not have a valid "
2988 "parent target!\n", __func__, targ->handle);
2989 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2993 if ((parent_target->devinfo &
2994 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2995 mps_dprint(sc, MPS_ERROR,
2996 "%s: handle %d parent %d does not "
2997 "have an SMP target!\n", __func__,
2998 targ->handle, parent_target->handle);
2999 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3004 sasaddr = parent_target->sasaddr;
3005 #else /* OLD_MPS_PROBE */
3006 if ((targ->parent_devinfo &
3007 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3008 mps_dprint(sc, MPS_ERROR,
3009 "%s: handle %d parent %d does not "
3010 "have an SMP target!\n", __func__,
3011 targ->handle, targ->parent_handle);
3012 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3016 if (targ->parent_sasaddr == 0x0) {
3017 mps_dprint(sc, MPS_ERROR,
3018 "%s: handle %d parent handle %d does "
3019 "not have a valid SAS address!\n",
3020 __func__, targ->handle, targ->parent_handle);
3021 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3025 sasaddr = targ->parent_sasaddr;
3026 #endif /* OLD_MPS_PROBE */
3031 mps_dprint(sc, MPS_INFO,
3032 "%s: unable to find SAS address for handle %d\n",
3033 __func__, targ->handle);
3034 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3037 mpssas_send_smpcmd(sassc, ccb, sasaddr);
3047 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3049 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3050 struct mps_softc *sc;
3051 struct mps_command *tm;
3052 struct mpssas_target *targ;
3054 MPS_FUNCTRACE(sassc->sc);
3055 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3057 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3058 ("Target %d out of bounds in XPT_RESET_DEV\n",
3059 ccb->ccb_h.target_id));
3061 tm = mpssas_alloc_tm(sc);
3063 mps_dprint(sc, MPS_ERROR,
3064 "command alloc failure in mpssas_action_resetdev\n");
3065 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3070 targ = &sassc->targets[ccb->ccb_h.target_id];
3071 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3072 req->DevHandle = htole16(targ->handle);
3073 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3075 /* SAS Hard Link Reset / SATA Link Reset */
3076 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3079 tm->cm_complete = mpssas_resetdev_complete;
3080 tm->cm_complete_data = ccb;
3083 mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3084 mps_map_command(sc, tm);
3088 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3090 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3094 mtx_assert(&sc->mps_mtx, MA_OWNED);
3096 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3097 ccb = tm->cm_complete_data;
3100 * Currently there should be no way we can hit this case. It only
3101 * happens when we have a failure to allocate chain frames, and
3102 * task management commands don't have S/G lists.
3104 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3105 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3107 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3109 mps_dprint(sc, MPS_ERROR,
3110 "%s: cm_flags = %#x for reset of handle %#04x! "
3111 "This should not happen!\n", __func__, tm->cm_flags,
3113 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3117 mps_dprint(sc, MPS_XINFO,
3118 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3119 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3121 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3122 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3123 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3127 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3131 mpssas_free_tm(sc, tm);
3136 mpssas_poll(struct cam_sim *sim)
3138 struct mpssas_softc *sassc;
3140 sassc = cam_sim_softc(sim);
3142 if (sassc->sc->mps_debug & MPS_TRACE) {
3143 /* frequent debug messages during a panic just slow
3144 * everything down too much.
3146 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3147 sassc->sc->mps_debug &= ~MPS_TRACE;
3150 mps_intr_locked(sassc->sc);
3154 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3157 struct mps_softc *sc;
3159 sc = (struct mps_softc *)callback_arg;
3162 case AC_ADVINFO_CHANGED: {
3163 struct mpssas_target *target;
3164 struct mpssas_softc *sassc;
3165 struct scsi_read_capacity_data_long rcap_buf;
3166 struct ccb_dev_advinfo cdai;
3167 struct mpssas_lun *lun;
3172 buftype = (uintptr_t)arg;
3178 * We're only interested in read capacity data changes.
3180 if (buftype != CDAI_TYPE_RCAPLONG)
3184 * We should have a handle for this, but check to make sure.
3186 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3187 ("Target %d out of bounds in mpssas_async\n",
3188 xpt_path_target_id(path)));
3189 target = &sassc->targets[xpt_path_target_id(path)];
3190 if (target->handle == 0)
3193 lunid = xpt_path_lun_id(path);
3195 SLIST_FOREACH(lun, &target->luns, lun_link) {
3196 if (lun->lun_id == lunid) {
3202 if (found_lun == 0) {
3203 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3206 mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3207 "LUN for EEDP support.\n");
3210 lun->lun_id = lunid;
3211 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3214 bzero(&rcap_buf, sizeof(rcap_buf));
3215 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3216 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3217 cdai.ccb_h.flags = CAM_DIR_IN;
3218 cdai.buftype = CDAI_TYPE_RCAPLONG;
3219 cdai.flags = CDAI_FLAG_NONE;
3220 cdai.bufsiz = sizeof(rcap_buf);
3221 cdai.buf = (uint8_t *)&rcap_buf;
3222 xpt_action((union ccb *)&cdai);
3223 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3224 cam_release_devq(cdai.ccb_h.path,
3227 if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3228 && (rcap_buf.prot & SRC16_PROT_EN)) {
3229 switch (rcap_buf.prot & SRC16_P_TYPE) {
3232 lun->eedp_formatted = TRUE;
3233 lun->eedp_block_size =
3234 scsi_4btoul(rcap_buf.length);
3238 lun->eedp_formatted = FALSE;
3239 lun->eedp_block_size = 0;
3243 lun->eedp_formatted = FALSE;
3244 lun->eedp_block_size = 0;
3254 * Set the INRESET flag for this target so that no I/O will be sent to
3255 * the target until the reset has completed. If an I/O request does
3256 * happen, the devq will be frozen. The CCB holds the path which is
3257 * used to release the devq. The devq is released and the CCB is freed
3258 * when the TM completes.
3261 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3262 struct mpssas_target *target, lun_id_t lun_id)
3267 ccb = xpt_alloc_ccb_nowait();
3269 path_id = cam_sim_path(sc->sassc->sim);
3270 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3271 target->tid, lun_id) != CAM_REQ_CMP) {
3275 tm->cm_targ = target;
3276 target->flags |= MPSSAS_TARGET_INRESET;
3282 mpssas_startup(struct mps_softc *sc)
3286 * Send the port enable message and set the wait_for_port_enable flag.
3287 * This flag helps to keep the simq frozen until all discovery events
3290 sc->wait_for_port_enable = 1;
3291 mpssas_send_portenable(sc);
3296 mpssas_send_portenable(struct mps_softc *sc)
3298 MPI2_PORT_ENABLE_REQUEST *request;
3299 struct mps_command *cm;
3303 if ((cm = mps_alloc_command(sc)) == NULL)
3305 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3306 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3307 request->MsgFlags = 0;
3309 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3310 cm->cm_complete = mpssas_portenable_complete;
3314 mps_map_command(sc, cm);
3315 mps_dprint(sc, MPS_XINFO,
3316 "mps_send_portenable finished cm %p req %p complete %p\n",
3317 cm, cm->cm_req, cm->cm_complete);
3322 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3324 MPI2_PORT_ENABLE_REPLY *reply;
3325 struct mpssas_softc *sassc;
3331 * Currently there should be no way we can hit this case. It only
3332 * happens when we have a failure to allocate chain frames, and
3333 * port enable commands don't have S/G lists.
3335 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3336 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3337 "This should not happen!\n", __func__, cm->cm_flags);
3340 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3342 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3343 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3344 MPI2_IOCSTATUS_SUCCESS)
3345 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3347 mps_free_command(sc, cm);
3350 * Get WarpDrive info after discovery is complete but before the scan
3351 * starts. At this point, all devices are ready to be exposed to the
3352 * OS. If devices should be hidden instead, take them out of the
3353 * 'targets' array before the scan. The devinfo for a disk will have
3354 * some info and a volume's will be 0. Use that to remove disks.
3356 mps_wd_config_pages(sc);
3359 * Done waiting for port enable to complete. Decrement the refcount.
3360 * If refcount is 0, discovery is complete and a rescan of the bus can
3361 * take place. Since the simq was explicitly frozen before port
3362 * enable, it must be explicitly released here to keep the
3363 * freeze/release count in sync.
3365 sc->wait_for_port_enable = 0;
3366 sc->port_enable_complete = 1;
3367 wakeup(&sc->port_enable_complete);
3368 mpssas_startup_decrement(sassc);
3372 mpssas_check_id(struct mpssas_softc *sassc, int id)
3374 struct mps_softc *sc = sassc->sc;
3378 ids = &sc->exclude_ids[0];
3379 while((name = strsep(&ids, ",")) != NULL) {
3380 if (name[0] == '\0')
3382 if (strtol(name, NULL, 0) == (long)id)
3390 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3392 struct mpssas_softc *sassc;
3393 struct mpssas_lun *lun, *lun_tmp;
3394 struct mpssas_target *targ;
3399 * The number of targets is based on IOC Facts, so free all of
3400 * the allocated LUNs for each target and then the target buffer
3403 for (i=0; i< maxtargets; i++) {
3404 targ = &sassc->targets[i];
3405 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3409 free(sassc->targets, M_MPT2);
3411 sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3412 M_MPT2, M_WAITOK|M_ZERO);