2 * Copyright (c) 2009 Yahoo! Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright (c) 2011 LSI Corp.
28 * All rights reserved.
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
33 * 1. Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in the
37 * documentation and/or other materials provided with the distribution.
39 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
40 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
43 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
44 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
45 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
46 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
47 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
48 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * LSI MPT-Fusion Host Adapter FreeBSD
56 #include <sys/cdefs.h>
57 __FBSDID("$FreeBSD$");
59 /* Communications core for LSI MPT2 */
61 /* TODO Move headers to mpsvar */
62 #include <sys/types.h>
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/selinfo.h>
67 #include <sys/module.h>
71 #include <sys/malloc.h>
73 #include <sys/sysctl.h>
74 #include <sys/endian.h>
75 #include <sys/queue.h>
76 #include <sys/kthread.h>
77 #include <sys/taskqueue.h>
80 #include <machine/bus.h>
81 #include <machine/resource.h>
84 #include <machine/stdarg.h>
87 #include <cam/cam_ccb.h>
88 #include <cam/cam_xpt.h>
89 #include <cam/cam_debug.h>
90 #include <cam/cam_sim.h>
91 #include <cam/cam_xpt_sim.h>
92 #include <cam/cam_xpt_periph.h>
93 #include <cam/cam_periph.h>
94 #include <cam/scsi/scsi_all.h>
95 #include <cam/scsi/scsi_message.h>
96 #if __FreeBSD_version >= 900026
97 #include <cam/scsi/smp_all.h>
100 #include <dev/mps/mpi/mpi2_type.h>
101 #include <dev/mps/mpi/mpi2.h>
102 #include <dev/mps/mpi/mpi2_ioc.h>
103 #include <dev/mps/mpi/mpi2_sas.h>
104 #include <dev/mps/mpi/mpi2_cnfg.h>
105 #include <dev/mps/mpi/mpi2_init.h>
106 #include <dev/mps/mpi/mpi2_tool.h>
107 #include <dev/mps/mps_ioctl.h>
108 #include <dev/mps/mpsvar.h>
109 #include <dev/mps/mps_table.h>
110 #include <dev/mps/mps_sas.h>
112 #define MPSSAS_DISCOVERY_TIMEOUT 20
113 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
116 * static array to check SCSI OpCode for EEDP protection bits
118 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
119 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
120 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
121 static uint8_t op_code_prot[256] = {
122 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
123 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
124 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
125 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
126 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
127 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
128 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
129 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
131 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
132 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
133 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
134 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
135 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
136 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
137 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
140 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
142 static struct mpssas_target * mpssas_find_target_by_handle(struct mpssas_softc *, int, uint16_t);
143 static void mpssas_discovery_timeout(void *data);
144 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
145 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
146 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
147 static void mpssas_poll(struct cam_sim *sim);
148 static void mpssas_scsiio_timeout(void *data);
149 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
150 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
151 struct mps_command *cm, union ccb *ccb);
152 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
153 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
154 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
155 #if __FreeBSD_version >= 900026
156 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
157 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
159 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
160 #endif //FreeBSD_version >= 900026
161 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
162 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
163 static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
164 static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb);
165 static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb);
166 static void mpssas_scanner_thread(void *arg);
167 #if __FreeBSD_version >= 1000006
168 static void mpssas_async(void *callback_arg, uint32_t code,
169 struct cam_path *path, void *arg);
171 static void mpssas_check_eedp(struct mpssas_softc *sassc);
172 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
174 static int mpssas_send_portenable(struct mps_softc *sc);
175 static void mpssas_portenable_complete(struct mps_softc *sc,
176 struct mps_command *cm);
178 static struct mpssas_target *
179 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
181 struct mpssas_target *target;
184 for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
185 target = &sassc->targets[i];
186 if (target->handle == handle)
193 /* we need to freeze the simq during attach and diag reset, to avoid failing
194 * commands before device handles have been found by discovery. Since
195 * discovery involves reading config pages and possibly sending commands,
196 * discovery actions may continue even after we receive the end of discovery
197 * event, so refcount discovery actions instead of assuming we can unfreeze
198 * the simq when we get the event.
201 mpssas_startup_increment(struct mpssas_softc *sassc)
203 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
204 if (sassc->startup_refcount++ == 0) {
205 /* just starting, freeze the simq */
206 mps_dprint(sassc->sc, MPS_INFO,
207 "%s freezing simq\n", __func__);
208 xpt_freeze_simq(sassc->sim, 1);
210 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
211 sassc->startup_refcount);
216 mpssas_startup_decrement(struct mpssas_softc *sassc)
218 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
219 if (--sassc->startup_refcount == 0) {
220 /* finished all discovery-related actions, release
221 * the simq and rescan for the latest topology.
223 mps_dprint(sassc->sc, MPS_INFO,
224 "%s releasing simq\n", __func__);
225 sassc->flags &= ~MPSSAS_IN_STARTUP;
226 xpt_release_simq(sassc->sim, 1);
227 mpssas_rescan_target(sassc->sc, NULL);
229 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
230 sassc->startup_refcount);
234 /* LSI's firmware requires us to stop sending commands when we're doing task
235 * management, so refcount the TMs and keep the simq frozen when any are in
239 mpssas_alloc_tm(struct mps_softc *sc)
241 struct mps_command *tm;
243 tm = mps_alloc_high_priority_command(sc);
245 if (sc->sassc->tm_count++ == 0) {
246 mps_printf(sc, "%s freezing simq\n", __func__);
247 xpt_freeze_simq(sc->sassc->sim, 1);
249 mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
250 sc->sassc->tm_count);
256 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
261 /* if there are no TMs in use, we can release the simq. We use our
262 * own refcount so that it's easier for a diag reset to cleanup and
265 if (--sc->sassc->tm_count == 0) {
266 mps_printf(sc, "%s releasing simq\n", __func__);
267 xpt_release_simq(sc->sassc->sim, 1);
269 mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
270 sc->sassc->tm_count);
272 mps_free_high_priority_command(sc, tm);
277 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
279 struct mpssas_softc *sassc = sc->sassc;
281 target_id_t targetid;
284 pathid = cam_sim_path(sassc->sim);
286 targetid = CAM_TARGET_WILDCARD;
288 targetid = targ - sassc->targets;
291 * Allocate a CCB and schedule a rescan.
293 ccb = xpt_alloc_ccb_nowait();
295 mps_dprint(sc, MPS_FAULT, "unable to alloc CCB for rescan\n");
299 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
300 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
301 mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n");
306 /* XXX Hardwired to scan the bus for now */
307 ccb->ccb_h.func_code = XPT_SCAN_BUS;
308 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
309 mpssas_rescan(sassc, ccb);
313 mpssas_log_command(struct mps_command *cm, const char *fmt, ...)
323 sbuf_new(&sb, str, sizeof(str), 0);
327 if (cm->cm_ccb != NULL) {
328 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
330 sbuf_cat(&sb, path_str);
331 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
332 scsi_command_string(&cm->cm_ccb->csio, &sb);
333 sbuf_printf(&sb, "length %d ",
334 cm->cm_ccb->csio.dxfer_len);
338 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
339 cam_sim_name(cm->cm_sc->sassc->sim),
340 cam_sim_unit(cm->cm_sc->sassc->sim),
341 cam_sim_bus(cm->cm_sc->sassc->sim),
342 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
346 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
347 sbuf_vprintf(&sb, fmt, ap);
349 printf("%s", sbuf_data(&sb));
355 mpssas_lost_target(struct mps_softc *sc, struct mpssas_target *targ)
357 struct mpssas_softc *sassc = sc->sassc;
358 path_id_t pathid = cam_sim_path(sassc->sim);
359 struct cam_path *path;
361 mps_printf(sc, "%s targetid %u\n", __func__, targ->tid);
362 if (xpt_create_path(&path, NULL, pathid, targ->tid, 0) != CAM_REQ_CMP) {
363 mps_printf(sc, "unable to create path for lost target %d\n",
368 xpt_async(AC_LOST_DEVICE, path, NULL);
373 * The MPT2 firmware performs debounce on the link to avoid transient link
374 * errors and false removals. When it does decide that link has been lost
375 * and a device need to go away, it expects that the host will perform a
376 * target reset and then an op remove. The reset has the side-effect of
377 * aborting any outstanding requests for the device, which is required for
378 * the op-remove to succeed. It's not clear if the host should check for
379 * the device coming back alive after the reset.
382 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
384 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
385 struct mps_softc *sc;
386 struct mps_command *cm;
387 struct mpssas_target *targ = NULL;
389 mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
392 * If this is a WD controller, determine if the disk should be exposed
393 * to the OS or not. If disk should be exposed, return from this
394 * function without doing anything.
397 if ((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE) && (sc->WD_hide_expose ==
398 MPS_WD_EXPOSE_ALWAYS)) {
402 targ = mpssas_find_target_by_handle(sassc, 0, handle);
404 /* FIXME: what is the action? */
405 /* We don't know about this device? */
406 printf("%s: invalid handle 0x%x \n", __func__, handle);
410 targ->flags |= MPSSAS_TARGET_INREMOVAL;
412 cm = mpssas_alloc_tm(sc);
414 mps_printf(sc, "%s: command alloc failure\n", __func__);
418 mpssas_lost_target(sc, targ);
420 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
421 memset(req, 0, sizeof(*req));
422 req->DevHandle = targ->handle;
423 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
424 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
426 /* SAS Hard Link Reset / SATA Link Reset */
427 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
431 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
432 cm->cm_complete = mpssas_remove_device;
433 cm->cm_complete_data = (void *)(uintptr_t)handle;
434 mps_map_command(sc, cm);
438 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
440 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
441 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
442 struct mpssas_target *targ;
443 struct mps_command *next_cm;
446 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
448 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
449 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
453 * Currently there should be no way we can hit this case. It only
454 * happens when we have a failure to allocate chain frames, and
455 * task management commands don't have S/G lists.
457 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
458 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
459 "This should not happen!\n", __func__, tm->cm_flags,
461 mpssas_free_tm(sc, tm);
466 /* XXX retry the remove after the diag reset completes? */
467 mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
469 mpssas_free_tm(sc, tm);
473 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
474 mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
475 reply->IOCStatus, handle);
476 mpssas_free_tm(sc, tm);
480 mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n",
481 reply->TerminationCount);
482 mps_free_reply(sc, tm->cm_reply_data);
483 tm->cm_reply = NULL; /* Ensures the the reply won't get re-freed */
485 /* Reuse the existing command */
486 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
487 memset(req, 0, sizeof(*req));
488 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
489 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
490 req->DevHandle = handle;
492 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
493 tm->cm_complete = mpssas_remove_complete;
494 tm->cm_complete_data = (void *)(uintptr_t)handle;
496 mps_map_command(sc, tm);
498 mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n",
500 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
503 mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm);
504 ccb = tm->cm_complete_data;
505 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
506 mpssas_scsiio_complete(sc, tm);
511 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
513 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
515 struct mpssas_target *targ;
517 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
519 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
520 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
523 * Currently there should be no way we can hit this case. It only
524 * happens when we have a failure to allocate chain frames, and
525 * task management commands don't have S/G lists.
527 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
528 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
529 "This should not happen!\n", __func__, tm->cm_flags,
531 mpssas_free_tm(sc, tm);
536 /* most likely a chip reset */
537 mps_printf(sc, "%s NULL reply removing device 0x%04x\n",
539 mpssas_free_tm(sc, tm);
543 mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
544 handle, reply->IOCStatus);
547 * Don't clear target if remove fails because things will get confusing.
548 * Leave the devname and sasaddr intact so that we know to avoid reusing
549 * this target id if possible, and so we can assign the same target id
550 * to this device if it comes back in the future.
552 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
555 targ->encl_handle = 0x0;
556 targ->encl_slot = 0x0;
557 targ->exp_dev_handle = 0x0;
559 targ->linkrate = 0x0;
563 mpssas_free_tm(sc, tm);
567 mpssas_register_events(struct mps_softc *sc)
572 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
573 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
574 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
575 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
576 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
577 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
578 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
579 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
580 setbit(events, MPI2_EVENT_IR_VOLUME);
581 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
582 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
583 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
585 mps_register_events(sc, events, mpssas_evt_handler, NULL,
586 &sc->sassc->mpssas_eh);
592 mps_attach_sas(struct mps_softc *sc)
594 struct mpssas_softc *sassc;
595 #if __FreeBSD_version >= 1000006
600 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
602 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
603 sassc->targets = malloc(sizeof(struct mpssas_target) *
604 sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
608 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
609 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n");
614 unit = device_get_unit(sc->mps_dev);
615 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
616 unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
617 if (sassc->sim == NULL) {
618 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n");
623 TAILQ_INIT(&sassc->ev_queue);
625 /* Initialize taskqueue for Event Handling */
626 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
627 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
628 taskqueue_thread_enqueue, &sassc->ev_tq);
630 /* Run the task queue with lowest priority */
631 taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
632 device_get_nameunit(sc->mps_dev));
634 TAILQ_INIT(&sassc->ccb_scanq);
635 error = mps_kproc_create(mpssas_scanner_thread, sassc,
636 &sassc->rescan_thread, 0, 0, "mps_scan%d", unit);
638 mps_printf(sc, "Error %d starting rescan thread\n", error);
643 sassc->flags |= MPSSAS_SCANTHREAD;
646 * XXX There should be a bus for every port on the adapter, but since
647 * we're just going to fake the topology for now, we'll pretend that
648 * everything is just a target on a single bus.
650 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
651 mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n",
658 * Assume that discovery events will start right away. Freezing
659 * the simq will prevent the CAM boottime scanner from running
660 * before discovery is complete.
662 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
663 xpt_freeze_simq(sassc->sim, 1);
664 sc->sassc->startup_refcount = 0;
666 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
667 sassc->discovery_timeouts = 0;
671 #if __FreeBSD_version >= 1000006
672 status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL);
673 if (status != CAM_REQ_CMP) {
674 mps_printf(sc, "Error %#x registering async handler for "
675 "AC_ADVINFO_CHANGED events\n", status);
681 mpssas_register_events(sc);
689 mps_detach_sas(struct mps_softc *sc)
691 struct mpssas_softc *sassc;
693 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
695 if (sc->sassc == NULL)
699 mps_deregister_events(sc, sassc->mpssas_eh);
702 * Drain and free the event handling taskqueue with the lock
703 * unheld so that any parallel processing tasks drain properly
704 * without deadlocking.
706 if (sassc->ev_tq != NULL)
707 taskqueue_free(sassc->ev_tq);
709 /* Make sure CAM doesn't wedge if we had to bail out early. */
712 /* Deregister our async handler */
713 #if __FreeBSD_version >= 1000006
714 xpt_register_async(0, mpssas_async, sc, NULL);
717 if (sassc->flags & MPSSAS_IN_STARTUP)
718 xpt_release_simq(sassc->sim, 1);
720 if (sassc->sim != NULL) {
721 xpt_bus_deregister(cam_sim_path(sassc->sim));
722 cam_sim_free(sassc->sim, FALSE);
725 if (sassc->flags & MPSSAS_SCANTHREAD) {
726 sassc->flags |= MPSSAS_SHUTDOWN;
727 wakeup(&sassc->ccb_scanq);
729 if (sassc->flags & MPSSAS_SCANTHREAD) {
730 msleep(&sassc->flags, &sc->mps_mtx, PRIBIO,
731 "mps_shutdown", 30 * hz);
736 if (sassc->devq != NULL)
737 cam_simq_free(sassc->devq);
739 free(sassc->targets, M_MPT2);
747 mpssas_discovery_end(struct mpssas_softc *sassc)
749 struct mps_softc *sc = sassc->sc;
751 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
753 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
754 callout_stop(&sassc->discovery_callout);
759 mpssas_discovery_timeout(void *data)
761 struct mpssas_softc *sassc = data;
762 struct mps_softc *sc;
765 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
769 "Timeout waiting for discovery, interrupts may not be working!\n");
770 sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
772 /* Poll the hardware for events in case interrupts aren't working */
775 mps_printf(sassc->sc,
776 "Finished polling after discovery timeout at %d\n", ticks);
778 if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
779 mpssas_discovery_end(sassc);
781 if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
782 sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
783 callout_reset(&sassc->discovery_callout,
784 MPSSAS_DISCOVERY_TIMEOUT * hz,
785 mpssas_discovery_timeout, sassc);
786 sassc->discovery_timeouts++;
788 mps_dprint(sassc->sc, MPS_FAULT,
789 "Discovery timed out, continuing.\n");
790 sassc->flags &= ~MPSSAS_IN_DISCOVERY;
791 mpssas_discovery_end(sassc);
799 mpssas_action(struct cam_sim *sim, union ccb *ccb)
801 struct mpssas_softc *sassc;
803 sassc = cam_sim_softc(sim);
805 mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
806 ccb->ccb_h.func_code);
807 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
809 switch (ccb->ccb_h.func_code) {
812 struct ccb_pathinq *cpi = &ccb->cpi;
814 cpi->version_num = 1;
815 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
816 cpi->target_sprt = 0;
817 cpi->hba_misc = PIM_NOBUSRESET;
818 cpi->hba_eng_cnt = 0;
819 cpi->max_target = sassc->sc->facts->MaxTargets - 1;
821 cpi->initiator_id = 255;
822 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
823 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
824 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
825 cpi->unit_number = cam_sim_unit(sim);
826 cpi->bus_id = cam_sim_bus(sim);
827 cpi->base_transfer_speed = 150000;
828 cpi->transport = XPORT_SAS;
829 cpi->transport_version = 0;
830 cpi->protocol = PROTO_SCSI;
831 cpi->protocol_version = SCSI_REV_SPC;
832 #if __FreeBSD_version >= 800001
834 * XXX KDM where does this number come from?
836 cpi->maxio = 256 * 1024;
838 cpi->ccb_h.status = CAM_REQ_CMP;
841 case XPT_GET_TRAN_SETTINGS:
843 struct ccb_trans_settings *cts;
844 struct ccb_trans_settings_sas *sas;
845 struct ccb_trans_settings_scsi *scsi;
846 struct mpssas_target *targ;
849 sas = &cts->xport_specific.sas;
850 scsi = &cts->proto_specific.scsi;
852 targ = &sassc->targets[cts->ccb_h.target_id];
853 if (targ->handle == 0x0) {
854 cts->ccb_h.status = CAM_TID_INVALID;
858 cts->protocol_version = SCSI_REV_SPC2;
859 cts->transport = XPORT_SAS;
860 cts->transport_version = 0;
862 sas->valid = CTS_SAS_VALID_SPEED;
863 switch (targ->linkrate) {
865 sas->bitrate = 150000;
868 sas->bitrate = 300000;
871 sas->bitrate = 600000;
877 cts->protocol = PROTO_SCSI;
878 scsi->valid = CTS_SCSI_VALID_TQ;
879 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
881 cts->ccb_h.status = CAM_REQ_CMP;
884 case XPT_CALC_GEOMETRY:
885 cam_calc_geometry(&ccb->ccg, /*extended*/1);
886 ccb->ccb_h.status = CAM_REQ_CMP;
889 mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n");
890 mpssas_action_resetdev(sassc, ccb);
895 mps_printf(sassc->sc, "mpssas_action faking success for "
897 ccb->ccb_h.status = CAM_REQ_CMP;
900 mpssas_action_scsiio(sassc, ccb);
902 #if __FreeBSD_version >= 900026
904 mpssas_action_smpio(sassc, ccb);
908 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
916 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
917 target_id_t target_id, lun_id_t lun_id)
919 path_id_t path_id = cam_sim_path(sc->sassc->sim);
920 struct cam_path *path;
922 mps_printf(sc, "%s code %x target %d lun %d\n", __func__,
923 ac_code, target_id, lun_id);
925 if (xpt_create_path(&path, NULL,
926 path_id, target_id, lun_id) != CAM_REQ_CMP) {
927 mps_printf(sc, "unable to create path for reset "
932 xpt_async(ac_code, path, NULL);
937 mpssas_complete_all_commands(struct mps_softc *sc)
939 struct mps_command *cm;
943 mps_printf(sc, "%s\n", __func__);
944 mtx_assert(&sc->mps_mtx, MA_OWNED);
946 /* complete all commands with a NULL reply */
947 for (i = 1; i < sc->num_reqs; i++) {
948 cm = &sc->commands[i];
952 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
953 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
955 if (cm->cm_complete != NULL) {
956 mpssas_log_command(cm,
957 "completing cm %p state %x ccb %p for diag reset\n",
958 cm, cm->cm_state, cm->cm_ccb);
960 cm->cm_complete(sc, cm);
964 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
965 mpssas_log_command(cm,
966 "waking up cm %p state %x ccb %p for diag reset\n",
967 cm, cm->cm_state, cm->cm_ccb);
972 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
973 /* this should never happen, but if it does, log */
974 mpssas_log_command(cm,
975 "cm %p state %x flags 0x%x ccb %p during diag "
976 "reset\n", cm, cm->cm_state, cm->cm_flags,
983 mpssas_handle_reinit(struct mps_softc *sc)
987 /* Go back into startup mode and freeze the simq, so that CAM
988 * doesn't send any commands until after we've rediscovered all
989 * targets and found the proper device handles for them.
991 * After the reset, portenable will trigger discovery, and after all
992 * discovery-related activities have finished, the simq will be
995 mps_printf(sc, "%s startup\n", __func__);
996 sc->sassc->flags |= MPSSAS_IN_STARTUP;
997 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
998 xpt_freeze_simq(sc->sassc->sim, 1);
1000 /* notify CAM of a bus reset */
1001 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1004 /* complete and cleanup after all outstanding commands */
1005 mpssas_complete_all_commands(sc);
1007 mps_printf(sc, "%s startup %u tm %u after command completion\n",
1008 __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1011 * The simq was explicitly frozen above, so set the refcount to 0.
1012 * The simq will be explicitly released after port enable completes.
1014 sc->sassc->startup_refcount = 0;
1016 /* zero all the target handles, since they may change after the
1017 * reset, and we have to rediscover all the targets and use the new
1020 for (i = 0; i < sc->facts->MaxTargets; i++) {
1021 if (sc->sassc->targets[i].outstanding != 0)
1022 mps_printf(sc, "target %u outstanding %u\n",
1023 i, sc->sassc->targets[i].outstanding);
1024 sc->sassc->targets[i].handle = 0x0;
1025 sc->sassc->targets[i].exp_dev_handle = 0x0;
1026 sc->sassc->targets[i].outstanding = 0;
1027 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1031 mpssas_tm_timeout(void *data)
1033 struct mps_command *tm = data;
1034 struct mps_softc *sc = tm->cm_sc;
1036 mtx_assert(&sc->mps_mtx, MA_OWNED);
1038 mpssas_log_command(tm, "task mgmt %p timed out\n", tm);
1043 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1045 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1046 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1047 unsigned int cm_count = 0;
1048 struct mps_command *cm;
1049 struct mpssas_target *targ;
1051 callout_stop(&tm->cm_callout);
1053 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1054 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1058 * Currently there should be no way we can hit this case. It only
1059 * happens when we have a failure to allocate chain frames, and
1060 * task management commands don't have S/G lists.
1062 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1063 mps_printf(sc, "%s: cm_flags = %#x for LUN reset! "
1064 "This should not happen!\n", __func__, tm->cm_flags);
1065 mpssas_free_tm(sc, tm);
1069 if (reply == NULL) {
1070 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1071 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1072 /* this completion was due to a reset, just cleanup */
1073 targ->flags &= ~MPSSAS_TARGET_INRESET;
1075 mpssas_free_tm(sc, tm);
1078 /* we should have gotten a reply. */
1084 mpssas_log_command(tm,
1085 "logical unit reset status 0x%x code 0x%x count %u\n",
1086 reply->IOCStatus, reply->ResponseCode,
1087 reply->TerminationCount);
1089 /* See if there are any outstanding commands for this LUN.
1090 * This could be made more efficient by using a per-LU data
1091 * structure of some sort.
1093 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1094 if (cm->cm_lun == tm->cm_lun)
1098 if (cm_count == 0) {
1099 mpssas_log_command(tm,
1100 "logical unit %u finished recovery after reset\n",
1103 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1106 /* we've finished recovery for this logical unit. check and
1107 * see if some other logical unit has a timedout command
1108 * that needs to be processed.
1110 cm = TAILQ_FIRST(&targ->timedout_commands);
1112 mpssas_send_abort(sc, tm, cm);
1116 mpssas_free_tm(sc, tm);
1120 /* if we still have commands for this LUN, the reset
1121 * effectively failed, regardless of the status reported.
1122 * Escalate to a target reset.
1124 mpssas_log_command(tm,
1125 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1127 mpssas_send_reset(sc, tm,
1128 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1133 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1135 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1136 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1137 struct mpssas_target *targ;
1139 callout_stop(&tm->cm_callout);
1141 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1142 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1146 * Currently there should be no way we can hit this case. It only
1147 * happens when we have a failure to allocate chain frames, and
1148 * task management commands don't have S/G lists.
1150 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1151 mps_printf(sc, "%s: cm_flags = %#x for target reset! "
1152 "This should not happen!\n", __func__, tm->cm_flags);
1153 mpssas_free_tm(sc, tm);
1157 if (reply == NULL) {
1158 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1159 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1160 /* this completion was due to a reset, just cleanup */
1161 targ->flags &= ~MPSSAS_TARGET_INRESET;
1163 mpssas_free_tm(sc, tm);
1166 /* we should have gotten a reply. */
1172 mpssas_log_command(tm,
1173 "target reset status 0x%x code 0x%x count %u\n",
1174 reply->IOCStatus, reply->ResponseCode,
1175 reply->TerminationCount);
1177 targ->flags &= ~MPSSAS_TARGET_INRESET;
1179 if (targ->outstanding == 0) {
1180 /* we've finished recovery for this target and all
1181 * of its logical units.
1183 mpssas_log_command(tm,
1184 "recovery finished after target reset\n");
1186 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1190 mpssas_free_tm(sc, tm);
1193 /* after a target reset, if this target still has
1194 * outstanding commands, the reset effectively failed,
1195 * regardless of the status reported. escalate.
1197 mpssas_log_command(tm,
1198 "target reset complete for tm %p, but still have %u command(s)\n",
1199 tm, targ->outstanding);
1204 #define MPS_RESET_TIMEOUT 30
1207 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1209 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1210 struct mpssas_target *target;
1213 target = tm->cm_targ;
1214 if (target->handle == 0) {
1215 mps_printf(sc, "%s null devhandle for target_id %d\n",
1216 __func__, target->tid);
1220 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1221 req->DevHandle = target->handle;
1222 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1223 req->TaskType = type;
1225 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1226 /* XXX Need to handle invalid LUNs */
1227 MPS_SET_LUN(req->LUN, tm->cm_lun);
1228 tm->cm_targ->logical_unit_resets++;
1229 mpssas_log_command(tm, "sending logical unit reset\n");
1230 tm->cm_complete = mpssas_logical_unit_reset_complete;
1232 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1233 /* Target reset method = SAS Hard Link Reset / SATA Link Reset */
1234 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1235 tm->cm_targ->target_resets++;
1236 tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1237 mpssas_log_command(tm, "sending target reset\n");
1238 tm->cm_complete = mpssas_target_reset_complete;
1241 mps_printf(sc, "unexpected reset type 0x%x\n", type);
1246 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1247 tm->cm_complete_data = (void *)tm;
1249 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1250 mpssas_tm_timeout, tm);
1252 err = mps_map_command(sc, tm);
1254 mpssas_log_command(tm,
1255 "error %d sending reset type %u\n",
1263 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1265 struct mps_command *cm;
1266 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1267 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1268 struct mpssas_target *targ;
1270 callout_stop(&tm->cm_callout);
1272 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1273 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1277 * Currently there should be no way we can hit this case. It only
1278 * happens when we have a failure to allocate chain frames, and
1279 * task management commands don't have S/G lists.
1281 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1282 mpssas_log_command(tm,
1283 "cm_flags = %#x for abort %p TaskMID %u!\n",
1284 tm->cm_flags, tm, req->TaskMID);
1285 mpssas_free_tm(sc, tm);
1289 if (reply == NULL) {
1290 mpssas_log_command(tm,
1291 "NULL abort reply for tm %p TaskMID %u\n",
1293 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1294 /* this completion was due to a reset, just cleanup */
1296 mpssas_free_tm(sc, tm);
1299 /* we should have gotten a reply. */
1305 mpssas_log_command(tm,
1306 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1308 reply->IOCStatus, reply->ResponseCode,
1309 reply->TerminationCount);
1311 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1313 /* if there are no more timedout commands, we're done with
1314 * error recovery for this target.
1316 mpssas_log_command(tm,
1317 "finished recovery after aborting TaskMID %u\n",
1321 mpssas_free_tm(sc, tm);
1323 else if (req->TaskMID != cm->cm_desc.Default.SMID) {
1324 /* abort success, but we have more timedout commands to abort */
1325 mpssas_log_command(tm,
1326 "continuing recovery after aborting TaskMID %u\n",
1329 mpssas_send_abort(sc, tm, cm);
1332 /* we didn't get a command completion, so the abort
1333 * failed as far as we're concerned. escalate.
1335 mpssas_log_command(tm,
1336 "abort failed for TaskMID %u tm %p\n",
1339 mpssas_send_reset(sc, tm,
1340 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1344 #define MPS_ABORT_TIMEOUT 5
1347 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1349 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1350 struct mpssas_target *targ;
1354 if (targ->handle == 0) {
1355 mps_printf(sc, "%s null devhandle for target_id %d\n",
1356 __func__, cm->cm_ccb->ccb_h.target_id);
1360 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1361 req->DevHandle = targ->handle;
1362 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1363 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1365 /* XXX Need to handle invalid LUNs */
1366 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1368 req->TaskMID = cm->cm_desc.Default.SMID;
1371 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1372 tm->cm_complete = mpssas_abort_complete;
1373 tm->cm_complete_data = (void *)tm;
1374 tm->cm_targ = cm->cm_targ;
1375 tm->cm_lun = cm->cm_lun;
1377 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1378 mpssas_tm_timeout, tm);
1382 err = mps_map_command(sc, tm);
1384 mpssas_log_command(tm,
1385 "error %d sending abort for cm %p SMID %u\n",
1386 err, cm, req->TaskMID);
1392 mpssas_scsiio_timeout(void *data)
1394 struct mps_softc *sc;
1395 struct mps_command *cm;
1396 struct mpssas_target *targ;
1398 cm = (struct mps_command *)data;
1401 mtx_assert(&sc->mps_mtx, MA_OWNED);
1403 mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm);
1406 * Run the interrupt handler to make sure it's not pending. This
1407 * isn't perfect because the command could have already completed
1408 * and been re-used, though this is unlikely.
1410 mps_intr_locked(sc);
1411 if (cm->cm_state == MPS_CM_STATE_FREE) {
1412 mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc);
1416 if (cm->cm_ccb == NULL) {
1417 mps_printf(sc, "command timeout with NULL ccb\n");
1421 mpssas_log_command(cm, "command timeout cm %p ccb %p\n",
1427 /* XXX first, check the firmware state, to see if it's still
1428 * operational. if not, do a diag reset.
1431 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1432 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1433 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1435 if (targ->tm != NULL) {
1436 /* target already in recovery, just queue up another
1437 * timedout command to be processed later.
1439 mps_printf(sc, "queued timedout cm %p for processing by tm %p\n",
1442 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1443 mps_printf(sc, "timedout cm %p allocated tm %p\n",
1446 /* start recovery by aborting the first timedout command */
1447 mpssas_send_abort(sc, targ->tm, cm);
1450 /* XXX queue this target up for recovery once a TM becomes
1451 * available. The firmware only has a limited number of
1452 * HighPriority credits for the high priority requests used
1453 * for task management, and we ran out.
1455 * Isilon: don't worry about this for now, since we have
1456 * more credits than disks in an enclosure, and limit
1457 * ourselves to one TM per target for recovery.
1459 mps_printf(sc, "timedout cm %p failed to allocate a tm\n",
1466 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1468 MPI2_SCSI_IO_REQUEST *req;
1469 struct ccb_scsiio *csio;
1470 struct mps_softc *sc;
1471 struct mpssas_target *targ;
1472 struct mpssas_lun *lun;
1473 struct mps_command *cm;
1474 uint8_t i, lba_byte, *ref_tag_addr;
1475 uint16_t eedp_flags;
1478 mps_dprint(sc, MPS_TRACE, "%s ccb %p\n", __func__, ccb);
1479 mtx_assert(&sc->mps_mtx, MA_OWNED);
1482 targ = &sassc->targets[csio->ccb_h.target_id];
1483 if (targ->handle == 0x0) {
1484 mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n",
1485 __func__, csio->ccb_h.target_id);
1486 csio->ccb_h.status = CAM_TID_INVALID;
1491 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1492 * that the volume has timed out. We want volumes to be enumerated
1493 * until they are deleted/removed, not just failed.
1495 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1496 if (targ->devinfo == 0)
1497 csio->ccb_h.status = CAM_REQ_CMP;
1499 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1504 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1505 mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__);
1506 csio->ccb_h.status = CAM_TID_INVALID;
1511 cm = mps_alloc_command(sc);
1513 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1514 xpt_freeze_simq(sassc->sim, 1);
1515 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1517 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1518 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1523 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1524 bzero(req, sizeof(*req));
1525 req->DevHandle = targ->handle;
1526 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1528 req->SenseBufferLowAddress = cm->cm_sense_busaddr;
1529 req->SenseBufferLength = MPS_SENSE_LEN;
1531 req->ChainOffset = 0;
1532 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1537 req->DataLength = csio->dxfer_len;
1538 req->BidirectionalDataLength = 0;
1539 req->IoFlags = csio->cdb_len;
1542 /* Note: BiDirectional transfers are not supported */
1543 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1545 req->Control = MPI2_SCSIIO_CONTROL_READ;
1546 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1549 req->Control = MPI2_SCSIIO_CONTROL_WRITE;
1550 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1554 req->Control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1559 * It looks like the hardware doesn't require an explicit tag
1560 * number for each transaction. SAM Task Management not supported
1563 switch (csio->tag_action) {
1564 case MSG_HEAD_OF_Q_TAG:
1565 req->Control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1567 case MSG_ORDERED_Q_TAG:
1568 req->Control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1571 req->Control |= MPI2_SCSIIO_CONTROL_ACAQ;
1573 case CAM_TAG_ACTION_NONE:
1574 case MSG_SIMPLE_Q_TAG:
1576 req->Control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1579 req->Control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1581 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1582 mps_free_command(sc, cm);
1583 ccb->ccb_h.status = CAM_LUN_INVALID;
1588 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1589 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1591 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1592 req->IoFlags = csio->cdb_len;
1595 * Check if EEDP is supported and enabled. If it is then check if the
1596 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1597 * is formatted for EEDP support. If all of this is true, set CDB up
1598 * for EEDP transfer.
1600 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1601 if (sc->eedp_enabled && eedp_flags) {
1602 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1603 if (lun->lun_id == csio->ccb_h.target_lun) {
1608 if ((lun != NULL) && (lun->eedp_formatted)) {
1609 req->EEDPBlockSize = lun->eedp_block_size;
1610 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1611 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1612 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1613 req->EEDPFlags = eedp_flags;
1616 * If CDB less than 32, fill in Primary Ref Tag with
1617 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1618 * already there. Also, set protection bit. FreeBSD
1619 * currently does not support CDBs bigger than 16, but
1620 * the code doesn't hurt, and will be here for the
1623 if (csio->cdb_len != 32) {
1624 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1625 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1626 PrimaryReferenceTag;
1627 for (i = 0; i < 4; i++) {
1629 req->CDB.CDB32[lba_byte + i];
1632 req->CDB.EEDP32.PrimaryApplicationTagMask =
1634 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1638 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1639 req->EEDPFlags = eedp_flags;
1640 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1646 cm->cm_data = csio->data_ptr;
1647 cm->cm_length = csio->dxfer_len;
1648 cm->cm_sge = &req->SGL;
1649 cm->cm_sglsize = (32 - 24) * 4;
1650 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1651 cm->cm_desc.SCSIIO.DevHandle = targ->handle;
1652 cm->cm_complete = mpssas_scsiio_complete;
1653 cm->cm_complete_data = ccb;
1655 cm->cm_lun = csio->ccb_h.target_lun;
1659 * If HBA is a WD and the command is not for a retry, try to build a
1660 * direct I/O message. If failed, or the command is for a retry, send
1661 * the I/O to the IR volume itself.
1663 if (sc->WD_valid_config) {
1664 if (ccb->ccb_h.status != MPS_WD_RETRY) {
1665 mpssas_direct_drive_io(sassc, cm, ccb);
1667 ccb->ccb_h.status = CAM_REQ_INPROG;
1671 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1672 mpssas_scsiio_timeout, cm);
1675 targ->outstanding++;
1676 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1678 if ((sc->mps_debug & MPS_TRACE) != 0)
1679 mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n",
1680 __func__, cm, ccb, targ->outstanding);
1682 mps_map_command(sc, cm);
1687 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1689 MPI2_SCSI_IO_REPLY *rep;
1691 struct ccb_scsiio *csio;
1692 struct mpssas_softc *sassc;
1693 struct scsi_vpd_supported_page_list *vpd_list = NULL;
1694 u8 *TLR_bits, TLR_on;
1698 mps_dprint(sc, MPS_TRACE,
1699 "%s cm %p SMID %u ccb %p reply %p outstanding %u\n",
1700 __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1701 cm->cm_targ->outstanding);
1703 callout_stop(&cm->cm_callout);
1704 mtx_assert(&sc->mps_mtx, MA_OWNED);
1707 ccb = cm->cm_complete_data;
1709 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1711 * XXX KDM if the chain allocation fails, does it matter if we do
1712 * the sync and unload here? It is simpler to do it in every case,
1713 * assuming it doesn't cause problems.
1715 if (cm->cm_data != NULL) {
1716 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1717 dir = BUS_DMASYNC_POSTREAD;
1718 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1719 dir = BUS_DMASYNC_POSTWRITE;;
1720 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1721 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1724 cm->cm_targ->completed++;
1725 cm->cm_targ->outstanding--;
1726 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
1728 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
1729 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
1730 if (cm->cm_reply != NULL)
1731 mpssas_log_command(cm,
1732 "completed timedout cm %p ccb %p during recovery "
1733 "ioc %x scsi %x state %x xfer %u\n",
1735 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1736 rep->TransferCount);
1738 mpssas_log_command(cm,
1739 "completed timedout cm %p ccb %p during recovery\n",
1741 } else if (cm->cm_targ->tm != NULL) {
1742 if (cm->cm_reply != NULL)
1743 mpssas_log_command(cm,
1744 "completed cm %p ccb %p during recovery "
1745 "ioc %x scsi %x state %x xfer %u\n",
1747 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
1748 rep->TransferCount);
1750 mpssas_log_command(cm,
1751 "completed cm %p ccb %p during recovery\n",
1753 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1754 mpssas_log_command(cm,
1755 "reset completed cm %p ccb %p\n",
1759 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1761 * We ran into an error after we tried to map the command,
1762 * so we're getting a callback without queueing the command
1763 * to the hardware. So we set the status here, and it will
1764 * be retained below. We'll go through the "fast path",
1765 * because there can be no reply when we haven't actually
1766 * gone out to the hardware.
1768 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1771 * Currently the only error included in the mask is
1772 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
1773 * chain frames. We need to freeze the queue until we get
1774 * a command that completed without this error, which will
1775 * hopefully have some chain frames attached that we can
1776 * use. If we wanted to get smarter about it, we would
1777 * only unfreeze the queue in this condition when we're
1778 * sure that we're getting some chain frames back. That's
1779 * probably unnecessary.
1781 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1782 xpt_freeze_simq(sassc->sim, 1);
1783 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1784 mps_dprint(sc, MPS_INFO, "Error sending command, "
1785 "freezing SIM queue\n");
1789 /* Take the fast path to completion */
1790 if (cm->cm_reply == NULL) {
1791 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1792 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
1793 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1795 ccb->ccb_h.status = CAM_REQ_CMP;
1796 ccb->csio.scsi_status = SCSI_STATUS_OK;
1798 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
1799 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1800 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
1801 mps_dprint(sc, MPS_INFO,
1802 "Unfreezing SIM queue\n");
1807 * There are two scenarios where the status won't be
1808 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
1809 * set, the second is in the MPS_FLAGS_DIAGRESET above.
1811 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1813 * Freeze the dev queue so that commands are
1814 * executed in the correct order with after error
1817 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1818 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
1820 mps_free_command(sc, cm);
1825 if (sc->mps_debug & MPS_TRACE)
1826 mpssas_log_command(cm,
1827 "ioc %x scsi %x state %x xfer %u\n",
1828 rep->IOCStatus, rep->SCSIStatus,
1829 rep->SCSIState, rep->TransferCount);
1832 * If this is a Direct Drive I/O, reissue the I/O to the original IR
1833 * Volume if an error occurred (normal I/O retry). Use the original
1834 * CCB, but set a flag that this will be a retry so that it's sent to
1835 * the original volume. Free the command but reuse the CCB.
1837 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
1838 mps_free_command(sc, cm);
1839 ccb->ccb_h.status = MPS_WD_RETRY;
1840 mpssas_action_scsiio(sassc, ccb);
1844 switch (rep->IOCStatus & MPI2_IOCSTATUS_MASK) {
1845 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1846 csio->resid = cm->cm_length - rep->TransferCount;
1848 case MPI2_IOCSTATUS_SUCCESS:
1849 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1851 if ((rep->IOCStatus & MPI2_IOCSTATUS_MASK) ==
1852 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
1853 mpssas_log_command(cm, "recovered error\n");
1855 /* Completion failed at the transport level. */
1856 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
1857 MPI2_SCSI_STATE_TERMINATED)) {
1858 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1862 /* In a modern packetized environment, an autosense failure
1863 * implies that there's not much else that can be done to
1864 * recover the command.
1866 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
1867 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1872 * CAM doesn't care about SAS Response Info data, but if this is
1873 * the state check if TLR should be done. If not, clear the
1874 * TLR_bits for the target.
1876 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
1877 ((rep->ResponseInfo & MPI2_SCSI_RI_MASK_REASONCODE) ==
1878 MPS_SCSI_RI_INVALID_FRAME)) {
1879 sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
1880 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1884 * Intentionally override the normal SCSI status reporting
1885 * for these two cases. These are likely to happen in a
1886 * multi-initiator environment, and we want to make sure that
1887 * CAM retries these commands rather than fail them.
1889 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
1890 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
1891 ccb->ccb_h.status = CAM_REQ_ABORTED;
1895 /* Handle normal status and sense */
1896 csio->scsi_status = rep->SCSIStatus;
1897 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
1898 ccb->ccb_h.status = CAM_REQ_CMP;
1900 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1902 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1903 int sense_len, returned_sense_len;
1905 returned_sense_len = min(rep->SenseCount,
1906 sizeof(struct scsi_sense_data));
1907 if (returned_sense_len < ccb->csio.sense_len)
1908 ccb->csio.sense_resid = ccb->csio.sense_len -
1911 ccb->csio.sense_resid = 0;
1913 sense_len = min(returned_sense_len,
1914 ccb->csio.sense_len - ccb->csio.sense_resid);
1915 bzero(&ccb->csio.sense_data,
1916 sizeof(&ccb->csio.sense_data));
1917 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
1918 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1922 * Check if this is an INQUIRY command. If it's a VPD inquiry,
1923 * and it's page code 0 (Supported Page List), and there is
1924 * inquiry data, and this is for a sequential access device, and
1925 * the device is an SSP target, and TLR is supported by the
1926 * controller, turn the TLR_bits value ON if page 0x90 is
1929 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
1930 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
1931 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
1932 (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
1933 T_SEQUENTIAL) && (sc->control_TLR) &&
1934 (sc->mapping_table[csio->ccb_h.target_id].device_info &
1935 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
1936 vpd_list = (struct scsi_vpd_supported_page_list *)
1938 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
1940 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
1941 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
1942 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
1943 csio->cdb_io.cdb_bytes[4];
1944 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
1945 if (vpd_list->list[i] == 0x90) {
1952 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1953 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1955 * If devinfo is 0 this will be a volume. In that case don't
1956 * tell CAM that the volume is not there. We want volumes to
1957 * be enumerated until they are deleted/removed, not just
1960 if (cm->cm_targ->devinfo == 0)
1961 ccb->ccb_h.status = CAM_REQ_CMP;
1963 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1965 case MPI2_IOCSTATUS_INVALID_SGL:
1966 mps_print_scsiio_cmd(sc, cm);
1967 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
1969 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1971 * This is one of the responses that comes back when an I/O
1972 * has been aborted. If it is because of a timeout that we
1973 * initiated, just set the status to CAM_CMD_TIMEOUT.
1974 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
1975 * command is the same (it gets retried, subject to the
1976 * retry counter), the only difference is what gets printed
1979 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
1980 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1982 ccb->ccb_h.status = CAM_REQ_ABORTED;
1984 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1985 /* resid is ignored for this condition */
1987 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1989 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1990 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1992 * Since these are generally external (i.e. hopefully
1993 * transient transport-related) errors, retry these without
1994 * decrementing the retry count.
1996 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1997 mpssas_log_command(cm,
1998 "terminated ioc %x scsi %x state %x xfer %u\n",
1999 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2000 rep->TransferCount);
2002 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2003 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2004 case MPI2_IOCSTATUS_INVALID_VPID:
2005 case MPI2_IOCSTATUS_INVALID_FIELD:
2006 case MPI2_IOCSTATUS_INVALID_STATE:
2007 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2008 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2009 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2010 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2011 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2013 mpssas_log_command(cm,
2014 "completed ioc %x scsi %x state %x xfer %u\n",
2015 rep->IOCStatus, rep->SCSIStatus, rep->SCSIState,
2016 rep->TransferCount);
2017 csio->resid = cm->cm_length;
2018 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2022 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2023 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2024 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2025 mps_dprint(sc, MPS_INFO, "Command completed, "
2026 "unfreezing SIM queue\n");
2029 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2030 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2031 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2034 mps_free_command(sc, cm);
2039 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2041 pMpi2SCSIIORequest_t pIO_req;
2042 struct mps_softc *sc = sassc->sc;
2044 uint32_t physLBA, stripe_offset, stripe_unit;
2045 uint32_t io_size, column;
2046 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2049 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2050 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2051 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2052 * bit different than the 10/16 CDBs, handle them separately.
2054 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2055 CDB = pIO_req->CDB.CDB32;
2058 * Handle 6 byte CDBs.
2060 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2061 (CDB[0] == WRITE_6))) {
2063 * Get the transfer size in blocks.
2065 io_size = (cm->cm_length >> sc->DD_block_exponent);
2068 * Get virtual LBA given in the CDB.
2070 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2071 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2074 * Check that LBA range for I/O does not exceed volume's
2077 if ((virtLBA + (uint64_t)io_size - 1) <=
2080 * Check if the I/O crosses a stripe boundary. If not,
2081 * translate the virtual LBA to a physical LBA and set
2082 * the DevHandle for the PhysDisk to be used. If it
2083 * does cross a boundry, do normal I/O. To get the
2084 * right DevHandle to use, get the map number for the
2085 * column, then use that map number to look up the
2086 * DevHandle of the PhysDisk.
2088 stripe_offset = (uint32_t)virtLBA &
2089 (sc->DD_stripe_size - 1);
2090 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2091 physLBA = (uint32_t)virtLBA >>
2092 sc->DD_stripe_exponent;
2093 stripe_unit = physLBA / sc->DD_num_phys_disks;
2094 column = physLBA % sc->DD_num_phys_disks;
2095 pIO_req->DevHandle =
2096 sc->DD_column_map[column].dev_handle;
2097 cm->cm_desc.SCSIIO.DevHandle =
2100 physLBA = (stripe_unit <<
2101 sc->DD_stripe_exponent) + stripe_offset;
2102 ptrLBA = &pIO_req->CDB.CDB32[1];
2103 physLBA_byte = (uint8_t)(physLBA >> 16);
2104 *ptrLBA = physLBA_byte;
2105 ptrLBA = &pIO_req->CDB.CDB32[2];
2106 physLBA_byte = (uint8_t)(physLBA >> 8);
2107 *ptrLBA = physLBA_byte;
2108 ptrLBA = &pIO_req->CDB.CDB32[3];
2109 physLBA_byte = (uint8_t)physLBA;
2110 *ptrLBA = physLBA_byte;
2113 * Set flag that Direct Drive I/O is
2116 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2123 * Handle 10 or 16 byte CDBs.
2125 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2126 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2127 (CDB[0] == WRITE_16))) {
2129 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2130 * are 0. If not, this is accessing beyond 2TB so handle it in
2131 * the else section. 10-byte CDB's are OK.
2133 if ((CDB[0] < READ_16) ||
2134 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2136 * Get the transfer size in blocks.
2138 io_size = (cm->cm_length >> sc->DD_block_exponent);
2141 * Get virtual LBA. Point to correct lower 4 bytes of
2142 * LBA in the CDB depending on command.
2144 lba_idx = (CDB[0] < READ_16) ? 2 : 6;
2145 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2146 ((uint64_t)CDB[lba_idx + 1] << 16) |
2147 ((uint64_t)CDB[lba_idx + 2] << 8) |
2148 (uint64_t)CDB[lba_idx + 3];
2151 * Check that LBA range for I/O does not exceed volume's
2154 if ((virtLBA + (uint64_t)io_size - 1) <=
2157 * Check if the I/O crosses a stripe boundary.
2158 * If not, translate the virtual LBA to a
2159 * physical LBA and set the DevHandle for the
2160 * PhysDisk to be used. If it does cross a
2161 * boundry, do normal I/O. To get the right
2162 * DevHandle to use, get the map number for the
2163 * column, then use that map number to look up
2164 * the DevHandle of the PhysDisk.
2166 stripe_offset = (uint32_t)virtLBA &
2167 (sc->DD_stripe_size - 1);
2168 if ((stripe_offset + io_size) <=
2169 sc->DD_stripe_size) {
2170 physLBA = (uint32_t)virtLBA >>
2171 sc->DD_stripe_exponent;
2172 stripe_unit = physLBA /
2173 sc->DD_num_phys_disks;
2175 sc->DD_num_phys_disks;
2176 pIO_req->DevHandle =
2177 sc->DD_column_map[column].
2179 cm->cm_desc.SCSIIO.DevHandle =
2182 physLBA = (stripe_unit <<
2183 sc->DD_stripe_exponent) +
2186 &pIO_req->CDB.CDB32[lba_idx];
2187 physLBA_byte = (uint8_t)(physLBA >> 24);
2188 *ptrLBA = physLBA_byte;
2190 &pIO_req->CDB.CDB32[lba_idx + 1];
2191 physLBA_byte = (uint8_t)(physLBA >> 16);
2192 *ptrLBA = physLBA_byte;
2194 &pIO_req->CDB.CDB32[lba_idx + 2];
2195 physLBA_byte = (uint8_t)(physLBA >> 8);
2196 *ptrLBA = physLBA_byte;
2198 &pIO_req->CDB.CDB32[lba_idx + 3];
2199 physLBA_byte = (uint8_t)physLBA;
2200 *ptrLBA = physLBA_byte;
2203 * Set flag that Direct Drive I/O is
2206 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2211 * 16-byte CDB and the upper 4 bytes of the CDB are not
2212 * 0. Get the transfer size in blocks.
2214 io_size = (cm->cm_length >> sc->DD_block_exponent);
2219 virtLBA = ((uint64_t)CDB[2] << 54) |
2220 ((uint64_t)CDB[3] << 48) |
2221 ((uint64_t)CDB[4] << 40) |
2222 ((uint64_t)CDB[5] << 32) |
2223 ((uint64_t)CDB[6] << 24) |
2224 ((uint64_t)CDB[7] << 16) |
2225 ((uint64_t)CDB[8] << 8) |
2229 * Check that LBA range for I/O does not exceed volume's
2232 if ((virtLBA + (uint64_t)io_size - 1) <=
2235 * Check if the I/O crosses a stripe boundary.
2236 * If not, translate the virtual LBA to a
2237 * physical LBA and set the DevHandle for the
2238 * PhysDisk to be used. If it does cross a
2239 * boundry, do normal I/O. To get the right
2240 * DevHandle to use, get the map number for the
2241 * column, then use that map number to look up
2242 * the DevHandle of the PhysDisk.
2244 stripe_offset = (uint32_t)virtLBA &
2245 (sc->DD_stripe_size - 1);
2246 if ((stripe_offset + io_size) <=
2247 sc->DD_stripe_size) {
2248 physLBA = (uint32_t)(virtLBA >>
2249 sc->DD_stripe_exponent);
2250 stripe_unit = physLBA /
2251 sc->DD_num_phys_disks;
2253 sc->DD_num_phys_disks;
2254 pIO_req->DevHandle =
2255 sc->DD_column_map[column].
2257 cm->cm_desc.SCSIIO.DevHandle =
2260 physLBA = (stripe_unit <<
2261 sc->DD_stripe_exponent) +
2265 * Set upper 4 bytes of LBA to 0. We
2266 * assume that the phys disks are less
2267 * than 2 TB's in size. Then, set the
2270 pIO_req->CDB.CDB32[2] = 0;
2271 pIO_req->CDB.CDB32[3] = 0;
2272 pIO_req->CDB.CDB32[4] = 0;
2273 pIO_req->CDB.CDB32[5] = 0;
2274 ptrLBA = &pIO_req->CDB.CDB32[6];
2275 physLBA_byte = (uint8_t)(physLBA >> 24);
2276 *ptrLBA = physLBA_byte;
2277 ptrLBA = &pIO_req->CDB.CDB32[7];
2278 physLBA_byte = (uint8_t)(physLBA >> 16);
2279 *ptrLBA = physLBA_byte;
2280 ptrLBA = &pIO_req->CDB.CDB32[8];
2281 physLBA_byte = (uint8_t)(physLBA >> 8);
2282 *ptrLBA = physLBA_byte;
2283 ptrLBA = &pIO_req->CDB.CDB32[9];
2284 physLBA_byte = (uint8_t)physLBA;
2285 *ptrLBA = physLBA_byte;
2288 * Set flag that Direct Drive I/O is
2291 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2298 #if __FreeBSD_version >= 900026
2300 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2302 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2303 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2307 ccb = cm->cm_complete_data;
2310 * Currently there should be no way we can hit this case. It only
2311 * happens when we have a failure to allocate chain frames, and SMP
2312 * commands require two S/G elements only. That should be handled
2313 * in the standard request size.
2315 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2316 mps_printf(sc, "%s: cm_flags = %#x on SMP request!\n",
2317 __func__, cm->cm_flags);
2318 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2322 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2324 mps_dprint(sc, MPS_INFO, "%s: NULL cm_reply!\n", __func__);
2325 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2329 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2330 sasaddr = le32toh(req->SASAddress.Low);
2331 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2333 if ((rpl->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS ||
2334 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2335 mps_dprint(sc, MPS_INFO, "%s: IOCStatus %04x SASStatus %02x\n",
2336 __func__, rpl->IOCStatus, rpl->SASStatus);
2337 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2341 mps_dprint(sc, MPS_INFO, "%s: SMP request to SAS address "
2342 "%#jx completed successfully\n", __func__,
2343 (uintmax_t)sasaddr);
2345 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2346 ccb->ccb_h.status = CAM_REQ_CMP;
2348 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2352 * We sync in both directions because we had DMAs in the S/G list
2353 * in both directions.
2355 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2356 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2357 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2358 mps_free_command(sc, cm);
2363 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2365 struct mps_command *cm;
2366 uint8_t *request, *response;
2367 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2368 struct mps_softc *sc;
2377 * XXX We don't yet support physical addresses here.
2379 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2380 mps_printf(sc, "%s: physical addresses not supported\n",
2382 ccb->ccb_h.status = CAM_REQ_INVALID;
2388 * If the user wants to send an S/G list, check to make sure they
2389 * have single buffers.
2391 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2393 * The chip does not support more than one buffer for the
2394 * request or response.
2396 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2397 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2398 mps_printf(sc, "%s: multiple request or response "
2399 "buffer segments not supported for SMP\n",
2401 ccb->ccb_h.status = CAM_REQ_INVALID;
2407 * The CAM_SCATTER_VALID flag was originally implemented
2408 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2409 * We have two. So, just take that flag to mean that we
2410 * might have S/G lists, and look at the S/G segment count
2411 * to figure out whether that is the case for each individual
2414 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2415 bus_dma_segment_t *req_sg;
2417 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2418 request = (uint8_t *)req_sg[0].ds_addr;
2420 request = ccb->smpio.smp_request;
2422 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2423 bus_dma_segment_t *rsp_sg;
2425 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2426 response = (uint8_t *)rsp_sg[0].ds_addr;
2428 response = ccb->smpio.smp_response;
2430 request = ccb->smpio.smp_request;
2431 response = ccb->smpio.smp_response;
2434 cm = mps_alloc_command(sc);
2436 mps_printf(sc, "%s: cannot allocate command\n", __func__);
2437 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2442 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2443 bzero(req, sizeof(*req));
2444 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2446 /* Allow the chip to use any route to this SAS address. */
2447 req->PhysicalPort = 0xff;
2449 req->RequestDataLength = ccb->smpio.smp_request_len;
2451 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2453 mps_dprint(sc, MPS_INFO, "%s: sending SMP request to SAS "
2454 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2456 mpi_init_sge(cm, req, &req->SGL);
2459 * Set up a uio to pass into mps_map_command(). This allows us to
2460 * do one map command, and one busdma call in there.
2462 cm->cm_uio.uio_iov = cm->cm_iovec;
2463 cm->cm_uio.uio_iovcnt = 2;
2464 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2467 * The read/write flag isn't used by busdma, but set it just in
2468 * case. This isn't exactly accurate, either, since we're going in
2471 cm->cm_uio.uio_rw = UIO_WRITE;
2473 cm->cm_iovec[0].iov_base = request;
2474 cm->cm_iovec[0].iov_len = req->RequestDataLength;
2475 cm->cm_iovec[1].iov_base = response;
2476 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2478 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2479 cm->cm_iovec[1].iov_len;
2482 * Trigger a warning message in mps_data_cb() for the user if we
2483 * wind up exceeding two S/G segments. The chip expects one
2484 * segment for the request and another for the response.
2486 cm->cm_max_segs = 2;
2488 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2489 cm->cm_complete = mpssas_smpio_complete;
2490 cm->cm_complete_data = ccb;
2493 * Tell the mapping code that we're using a uio, and that this is
2494 * an SMP passthrough request. There is a little special-case
2495 * logic there (in mps_data_cb()) to handle the bidirectional
2498 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2499 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2501 /* The chip data format is little endian. */
2502 req->SASAddress.High = htole32(sasaddr >> 32);
2503 req->SASAddress.Low = htole32(sasaddr);
2506 * XXX Note that we don't have a timeout/abort mechanism here.
2507 * From the manual, it looks like task management requests only
2508 * work for SCSI IO and SATA passthrough requests. We may need to
2509 * have a mechanism to retry requests in the event of a chip reset
2510 * at least. Hopefully the chip will insure that any errors short
2511 * of that are relayed back to the driver.
2513 error = mps_map_command(sc, cm);
2514 if ((error != 0) && (error != EINPROGRESS)) {
2515 mps_printf(sc, "%s: error %d returned from mps_map_command()\n",
2523 mps_free_command(sc, cm);
2524 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2531 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2533 struct mps_softc *sc;
2534 struct mpssas_target *targ;
2535 uint64_t sasaddr = 0;
2540 * Make sure the target exists.
2542 targ = &sassc->targets[ccb->ccb_h.target_id];
2543 if (targ->handle == 0x0) {
2544 mps_printf(sc, "%s: target %d does not exist!\n", __func__,
2545 ccb->ccb_h.target_id);
2546 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2552 * If this device has an embedded SMP target, we'll talk to it
2554 * figure out what the expander's address is.
2556 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2557 sasaddr = targ->sasaddr;
2560 * If we don't have a SAS address for the expander yet, try
2561 * grabbing it from the page 0x83 information cached in the
2562 * transport layer for this target. LSI expanders report the
2563 * expander SAS address as the port-associated SAS address in
2564 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2567 * XXX KDM disable this for now, but leave it commented out so that
2568 * it is obvious that this is another possible way to get the SAS
2571 * The parent handle method below is a little more reliable, and
2572 * the other benefit is that it works for devices other than SES
2573 * devices. So you can send a SMP request to a da(4) device and it
2574 * will get routed to the expander that device is attached to.
2575 * (Assuming the da(4) device doesn't contain an SMP target...)
2579 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2583 * If we still don't have a SAS address for the expander, look for
2584 * the parent device of this device, which is probably the expander.
2587 #ifdef OLD_MPS_PROBE
2588 struct mpssas_target *parent_target;
2591 if (targ->parent_handle == 0x0) {
2592 mps_printf(sc, "%s: handle %d does not have a valid "
2593 "parent handle!\n", __func__, targ->handle);
2594 ccb->ccb_h.status = CAM_REQ_INVALID;
2597 #ifdef OLD_MPS_PROBE
2598 parent_target = mpssas_find_target_by_handle(sassc, 0,
2599 targ->parent_handle);
2601 if (parent_target == NULL) {
2602 mps_printf(sc, "%s: handle %d does not have a valid "
2603 "parent target!\n", __func__, targ->handle);
2604 ccb->ccb_h.status = CAM_REQ_INVALID;
2608 if ((parent_target->devinfo &
2609 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2610 mps_printf(sc, "%s: handle %d parent %d does not "
2611 "have an SMP target!\n", __func__,
2612 targ->handle, parent_target->handle);
2613 ccb->ccb_h.status = CAM_REQ_INVALID;
2618 sasaddr = parent_target->sasaddr;
2619 #else /* OLD_MPS_PROBE */
2620 if ((targ->parent_devinfo &
2621 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2622 mps_printf(sc, "%s: handle %d parent %d does not "
2623 "have an SMP target!\n", __func__,
2624 targ->handle, targ->parent_handle);
2625 ccb->ccb_h.status = CAM_REQ_INVALID;
2629 if (targ->parent_sasaddr == 0x0) {
2630 mps_printf(sc, "%s: handle %d parent handle %d does "
2631 "not have a valid SAS address!\n",
2632 __func__, targ->handle, targ->parent_handle);
2633 ccb->ccb_h.status = CAM_REQ_INVALID;
2637 sasaddr = targ->parent_sasaddr;
2638 #endif /* OLD_MPS_PROBE */
2643 mps_printf(sc, "%s: unable to find SAS address for handle %d\n",
2644 __func__, targ->handle);
2645 ccb->ccb_h.status = CAM_REQ_INVALID;
2648 mpssas_send_smpcmd(sassc, ccb, sasaddr);
2656 #endif //__FreeBSD_version >= 900026
2659 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
2661 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2662 struct mps_softc *sc;
2663 struct mps_command *tm;
2664 struct mpssas_target *targ;
2666 mps_dprint(sassc->sc, MPS_TRACE, __func__);
2667 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
2670 tm = mps_alloc_command(sc);
2672 mps_printf(sc, "comand alloc failure in mpssas_action_resetdev\n");
2673 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2678 targ = &sassc->targets[ccb->ccb_h.target_id];
2679 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2680 req->DevHandle = targ->handle;
2681 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2682 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2684 /* SAS Hard Link Reset / SATA Link Reset */
2685 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2688 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2689 tm->cm_complete = mpssas_resetdev_complete;
2690 tm->cm_complete_data = ccb;
2691 mps_map_command(sc, tm);
2695 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
2697 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2700 mps_dprint(sc, MPS_TRACE, __func__);
2701 mtx_assert(&sc->mps_mtx, MA_OWNED);
2703 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2704 ccb = tm->cm_complete_data;
2707 * Currently there should be no way we can hit this case. It only
2708 * happens when we have a failure to allocate chain frames, and
2709 * task management commands don't have S/G lists.
2711 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2712 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2714 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2716 mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! "
2717 "This should not happen!\n", __func__, tm->cm_flags,
2719 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2723 printf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2724 resp->IOCStatus, resp->ResponseCode);
2726 if (resp->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2727 ccb->ccb_h.status = CAM_REQ_CMP;
2728 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2732 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2736 mpssas_free_tm(sc, tm);
2741 mpssas_poll(struct cam_sim *sim)
2743 struct mpssas_softc *sassc;
2745 sassc = cam_sim_softc(sim);
2747 if (sassc->sc->mps_debug & MPS_TRACE) {
2748 /* frequent debug messages during a panic just slow
2749 * everything down too much.
2751 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
2752 sassc->sc->mps_debug &= ~MPS_TRACE;
2755 mps_intr_locked(sassc->sc);
2759 mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
2761 struct mpssas_softc *sassc;
2764 if (done_ccb == NULL)
2767 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
2769 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
2771 xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
2772 mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str);
2774 xpt_free_path(done_ccb->ccb_h.path);
2775 xpt_free_ccb(done_ccb);
2777 #if __FreeBSD_version < 1000006
2779 * Before completing scan, get EEDP stuff for all of the existing
2782 mpssas_check_eedp(sassc);
2787 /* thread to handle bus rescans */
2789 mpssas_scanner_thread(void *arg)
2791 struct mpssas_softc *sassc;
2792 struct mps_softc *sc;
2795 sassc = (struct mpssas_softc *)arg;
2798 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
2802 msleep(&sassc->ccb_scanq, &sc->mps_mtx, PRIBIO,
2804 if (sassc->flags & MPSSAS_SHUTDOWN) {
2805 mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
2808 ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq);
2811 TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2815 sassc->flags &= ~MPSSAS_SCANTHREAD;
2816 wakeup(&sassc->flags);
2818 mps_dprint(sc, MPS_TRACE, "Scanner exiting\n");
2823 mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb)
2827 mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
2829 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
2834 xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2835 mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str);
2837 /* Prepare request */
2838 ccb->ccb_h.ppriv_ptr1 = sassc;
2839 ccb->ccb_h.cbfcnp = mpssas_rescan_done;
2840 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT);
2841 TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
2842 wakeup(&sassc->ccb_scanq);
2845 #if __FreeBSD_version >= 1000006
2847 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
2850 struct mps_softc *sc;
2852 sc = (struct mps_softc *)callback_arg;
2855 case AC_ADVINFO_CHANGED: {
2856 struct mpssas_target *target;
2857 struct mpssas_softc *sassc;
2858 struct scsi_read_capacity_data_long rcap_buf;
2859 struct ccb_dev_advinfo cdai;
2860 struct mpssas_lun *lun;
2865 buftype = (uintptr_t)arg;
2871 * We're only interested in read capacity data changes.
2873 if (buftype != CDAI_TYPE_RCAPLONG)
2877 * We're only interested in devices that are attached to
2880 if (xpt_path_path_id(path) != sassc->sim->path_id)
2884 * We should have a handle for this, but check to make sure.
2886 target = &sassc->targets[xpt_path_target_id(path)];
2887 if (target->handle == 0)
2890 lunid = xpt_path_lun_id(path);
2892 SLIST_FOREACH(lun, &target->luns, lun_link) {
2893 if (lun->lun_id == lunid) {
2899 if (found_lun == 0) {
2900 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
2903 mps_dprint(sc, MPS_FAULT, "Unable to alloc "
2904 "LUN for EEDP support.\n");
2907 lun->lun_id = lunid;
2908 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
2911 bzero(&rcap_buf, sizeof(rcap_buf));
2912 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
2913 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
2914 cdai.ccb_h.flags = CAM_DIR_IN;
2915 cdai.buftype = CDAI_TYPE_RCAPLONG;
2917 cdai.bufsiz = sizeof(rcap_buf);
2918 cdai.buf = (uint8_t *)&rcap_buf;
2919 xpt_action((union ccb *)&cdai);
2920 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
2921 cam_release_devq(cdai.ccb_h.path,
2924 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
2925 && (rcap_buf.prot & SRC16_PROT_EN)) {
2926 lun->eedp_formatted = TRUE;
2927 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
2929 lun->eedp_formatted = FALSE;
2930 lun->eedp_block_size = 0;
2938 #else /* __FreeBSD_version >= 1000006 */
2941 mpssas_check_eedp(struct mpssas_softc *sassc)
2943 struct mps_softc *sc = sassc->sc;
2944 struct ccb_scsiio *csio;
2945 struct scsi_read_capacity_16 *scsi_cmd;
2946 struct scsi_read_capacity_eedp *rcap_buf;
2948 path_id_t pathid = cam_sim_path(sassc->sim);
2949 target_id_t targetid;
2951 struct cam_periph *found_periph;
2952 struct mpssas_target *target;
2953 struct mpssas_lun *lun;
2957 * Issue a READ CAPACITY 16 command to each LUN of each target. This
2958 * info is used to determine if the LUN is formatted for EEDP support.
2960 for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
2961 target = &sassc->targets[targetid];
2962 if (target->handle == 0x0) {
2969 malloc(sizeof(struct scsi_read_capacity_eedp),
2970 M_MPT2, M_NOWAIT | M_ZERO);
2971 if (rcap_buf == NULL) {
2972 mps_dprint(sc, MPS_FAULT, "Unable to alloc read "
2973 "capacity buffer for EEDP support.\n");
2976 ccb = xpt_alloc_ccb_nowait();
2978 mps_dprint(sc, MPS_FAULT, "Unable to alloc CCB "
2979 "for EEDP support.\n");
2980 free(rcap_buf, M_MPT2);
2984 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
2985 pathid, targetid, lunid) != CAM_REQ_CMP) {
2986 mps_dprint(sc, MPS_FAULT, "Unable to create "
2987 "path for EEDP support\n");
2988 free(rcap_buf, M_MPT2);
2994 * If a periph is returned, the LUN exists. Create an
2995 * entry in the target's LUN list.
2997 if ((found_periph = cam_periph_find(ccb->ccb_h.path,
3000 * If LUN is already in list, don't create a new
3004 SLIST_FOREACH(lun, &target->luns, lun_link) {
3005 if (lun->lun_id == lunid) {
3011 lun = malloc(sizeof(struct mpssas_lun),
3012 M_MPT2, M_WAITOK | M_ZERO);
3014 mps_dprint(sc, MPS_FAULT,
3015 "Unable to alloc LUN for "
3017 free(rcap_buf, M_MPT2);
3018 xpt_free_path(ccb->ccb_h.path);
3022 lun->lun_id = lunid;
3023 SLIST_INSERT_HEAD(&target->luns, lun,
3029 * Issue a READ CAPACITY 16 command for the LUN.
3030 * The mpssas_read_cap_done function will load
3031 * the read cap info into the LUN struct.
3034 csio->ccb_h.func_code = XPT_SCSI_IO;
3035 csio->ccb_h.flags = CAM_DIR_IN;
3036 csio->ccb_h.retry_count = 4;
3037 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3038 csio->ccb_h.timeout = 60000;
3039 csio->data_ptr = (uint8_t *)rcap_buf;
3040 csio->dxfer_len = sizeof(struct
3041 scsi_read_capacity_eedp);
3042 csio->sense_len = MPS_SENSE_LEN;
3043 csio->cdb_len = sizeof(*scsi_cmd);
3044 csio->tag_action = MSG_SIMPLE_Q_TAG;
3046 scsi_cmd = (struct scsi_read_capacity_16 *)
3047 &csio->cdb_io.cdb_bytes;
3048 bzero(scsi_cmd, sizeof(*scsi_cmd));
3049 scsi_cmd->opcode = 0x9E;
3050 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3051 ((uint8_t *)scsi_cmd)[13] = sizeof(struct
3052 scsi_read_capacity_eedp);
3055 * Set the path, target and lun IDs for the READ
3058 ccb->ccb_h.path_id =
3059 xpt_path_path_id(ccb->ccb_h.path);
3060 ccb->ccb_h.target_id =
3061 xpt_path_target_id(ccb->ccb_h.path);
3062 ccb->ccb_h.target_lun =
3063 xpt_path_lun_id(ccb->ccb_h.path);
3065 ccb->ccb_h.ppriv_ptr1 = sassc;
3068 free(rcap_buf, M_MPT2);
3069 xpt_free_path(ccb->ccb_h.path);
3072 } while (found_periph);
3078 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3080 struct mpssas_softc *sassc;
3081 struct mpssas_target *target;
3082 struct mpssas_lun *lun;
3083 struct scsi_read_capacity_eedp *rcap_buf;
3085 if (done_ccb == NULL)
3088 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3091 * Get the LUN ID for the path and look it up in the LUN list for the
3094 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3095 target = &sassc->targets[done_ccb->ccb_h.target_id];
3096 SLIST_FOREACH(lun, &target->luns, lun_link) {
3097 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3101 * Got the LUN in the target's LUN list. Fill it in
3102 * with EEDP info. If the READ CAP 16 command had some
3103 * SCSI error (common if command is not supported), mark
3104 * the lun as not supporting EEDP and set the block size
3107 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3108 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3109 lun->eedp_formatted = FALSE;
3110 lun->eedp_block_size = 0;
3114 if (rcap_buf->protect & 0x01) {
3115 lun->eedp_formatted = TRUE;
3116 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3121 // Finished with this CCB and path.
3122 free(rcap_buf, M_MPT2);
3123 xpt_free_path(done_ccb->ccb_h.path);
3124 xpt_free_ccb(done_ccb);
3126 #endif /* __FreeBSD_version >= 1000006 */
3129 mpssas_startup(struct mps_softc *sc)
3131 struct mpssas_softc *sassc;
3134 * Send the port enable message and set the wait_for_port_enable flag.
3135 * This flag helps to keep the simq frozen until all discovery events
3139 mpssas_startup_increment(sassc);
3140 sc->wait_for_port_enable = 1;
3141 mpssas_send_portenable(sc);
3146 mpssas_send_portenable(struct mps_softc *sc)
3148 MPI2_PORT_ENABLE_REQUEST *request;
3149 struct mps_command *cm;
3151 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3153 if ((cm = mps_alloc_command(sc)) == NULL)
3155 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3156 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3157 request->MsgFlags = 0;
3159 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3160 cm->cm_complete = mpssas_portenable_complete;
3164 mps_map_command(sc, cm);
3165 mps_dprint(sc, MPS_TRACE,
3166 "mps_send_portenable finished cm %p req %p complete %p\n",
3167 cm, cm->cm_req, cm->cm_complete);
3172 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3174 MPI2_PORT_ENABLE_REPLY *reply;
3175 struct mpssas_softc *sassc;
3176 struct mpssas_target *target;
3179 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3183 * Currently there should be no way we can hit this case. It only
3184 * happens when we have a failure to allocate chain frames, and
3185 * port enable commands don't have S/G lists.
3187 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3188 mps_printf(sc, "%s: cm_flags = %#x for port enable! "
3189 "This should not happen!\n", __func__, cm->cm_flags);
3192 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3194 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3195 else if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3196 MPI2_IOCSTATUS_SUCCESS)
3197 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3199 mps_free_command(sc, cm);
3200 if (sc->mps_ich.ich_arg != NULL) {
3201 mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n");
3202 config_intrhook_disestablish(&sc->mps_ich);
3203 sc->mps_ich.ich_arg = NULL;
3207 * Get WarpDrive info after discovery is complete but before the scan
3208 * starts. At this point, all devices are ready to be exposed to the
3209 * OS. If devices should be hidden instead, take them out of the
3210 * 'targets' array before the scan. The devinfo for a disk will have
3211 * some info and a volume's will be 0. Use that to remove disks.
3213 mps_wd_config_pages(sc);
3214 if (((sc->mps_flags & MPS_FLAGS_WD_AVAILABLE)
3215 && (sc->WD_hide_expose == MPS_WD_HIDE_ALWAYS))
3216 || (sc->WD_valid_config && (sc->WD_hide_expose ==
3217 MPS_WD_HIDE_IF_VOLUME))) {
3218 for (i = 0; i < sassc->sc->facts->MaxTargets; i++) {
3219 target = &sassc->targets[i];
3220 if (target->devinfo) {
3221 target->devinfo = 0x0;
3222 target->encl_handle = 0x0;
3223 target->encl_slot = 0x0;
3224 target->handle = 0x0;
3226 target->linkrate = 0x0;
3227 target->flags = 0x0;
3233 * Done waiting for port enable to complete. Decrement the refcount.
3234 * If refcount is 0, discovery is complete and a rescan of the bus can
3235 * take place. Since the simq was explicitly frozen before port
3236 * enable, it must be explicitly released here to keep the
3237 * freeze/release count in sync.
3239 sc->wait_for_port_enable = 0;
3240 sc->port_enable_complete = 1;
3241 mpssas_startup_decrement(sassc);
3242 xpt_release_simq(sassc->sim, 1);