2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011, 2012 LSI Corp.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * LSI MPT-Fusion Host Adapter FreeBSD
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 /* Communications core for LSI MPT2 */
37 /* TODO Move headers to mpsvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
47 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <machine/stdarg.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_xpt.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
76 #include <dev/mps/mpi/mpi2_type.h>
77 #include <dev/mps/mpi/mpi2.h>
78 #include <dev/mps/mpi/mpi2_ioc.h>
79 #include <dev/mps/mpi/mpi2_sas.h>
80 #include <dev/mps/mpi/mpi2_cnfg.h>
81 #include <dev/mps/mpi/mpi2_init.h>
82 #include <dev/mps/mpi/mpi2_tool.h>
83 #include <dev/mps/mps_ioctl.h>
84 #include <dev/mps/mpsvar.h>
85 #include <dev/mps/mps_table.h>
86 #include <dev/mps/mps_sas.h>
88 #define MPSSAS_DISCOVERY_TIMEOUT 20
89 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
92 * static array to check SCSI OpCode for EEDP protection bits
94 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 static uint8_t op_code_prot[256] = {
98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
116 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
118 static void mpssas_discovery_timeout(void *data);
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static void mpssas_scsiio_timeout(void *data);
124 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
125 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
126 struct mps_command *cm, union ccb *ccb);
127 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
128 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
129 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
130 #if __FreeBSD_version >= 900026
131 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
132 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
134 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
135 #endif //FreeBSD_version >= 900026
136 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
138 static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
139 static void mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb);
140 static void mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb);
141 static void mpssas_scanner_thread(void *arg);
142 #if __FreeBSD_version >= 1000006
143 static void mpssas_async(void *callback_arg, uint32_t code,
144 struct cam_path *path, void *arg);
146 static void mpssas_check_eedp(struct mpssas_softc *sassc);
147 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
149 static int mpssas_send_portenable(struct mps_softc *sc);
150 static void mpssas_portenable_complete(struct mps_softc *sc,
151 struct mps_command *cm);
153 struct mpssas_target *
154 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
156 struct mpssas_target *target;
159 for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
160 target = &sassc->targets[i];
161 if (target->handle == handle)
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169 * commands before device handles have been found by discovery. Since
170 * discovery involves reading config pages and possibly sending commands,
171 * discovery actions may continue even after we receive the end of discovery
172 * event, so refcount discovery actions instead of assuming we can unfreeze
173 * the simq when we get the event.
176 mpssas_startup_increment(struct mpssas_softc *sassc)
178 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 if (sassc->startup_refcount++ == 0) {
180 /* just starting, freeze the simq */
181 mps_dprint(sassc->sc, MPS_INFO,
182 "%s freezing simq\n", __func__);
183 xpt_freeze_simq(sassc->sim, 1);
185 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
186 sassc->startup_refcount);
191 mpssas_startup_decrement(struct mpssas_softc *sassc)
193 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
194 if (--sassc->startup_refcount == 0) {
195 /* finished all discovery-related actions, release
196 * the simq and rescan for the latest topology.
198 mps_dprint(sassc->sc, MPS_INFO,
199 "%s releasing simq\n", __func__);
200 sassc->flags &= ~MPSSAS_IN_STARTUP;
201 xpt_release_simq(sassc->sim, 1);
202 mpssas_rescan_target(sassc->sc, NULL);
204 mps_dprint(sassc->sc, MPS_TRACE, "%s refcount %u\n", __func__,
205 sassc->startup_refcount);
209 /* LSI's firmware requires us to stop sending commands when we're doing task
210 * management, so refcount the TMs and keep the simq frozen when any are in
214 mpssas_alloc_tm(struct mps_softc *sc)
216 struct mps_command *tm;
218 tm = mps_alloc_high_priority_command(sc);
220 if (sc->sassc->tm_count++ == 0) {
221 mps_printf(sc, "%s freezing simq\n", __func__);
222 xpt_freeze_simq(sc->sassc->sim, 1);
224 mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
225 sc->sassc->tm_count);
231 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
236 /* if there are no TMs in use, we can release the simq. We use our
237 * own refcount so that it's easier for a diag reset to cleanup and
240 if (--sc->sassc->tm_count == 0) {
241 mps_printf(sc, "%s releasing simq\n", __func__);
242 xpt_release_simq(sc->sassc->sim, 1);
244 mps_dprint(sc, MPS_TRACE, "%s tm_count %u\n", __func__,
245 sc->sassc->tm_count);
247 mps_free_high_priority_command(sc, tm);
252 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
254 struct mpssas_softc *sassc = sc->sassc;
256 target_id_t targetid;
259 pathid = cam_sim_path(sassc->sim);
261 targetid = CAM_TARGET_WILDCARD;
263 targetid = targ - sassc->targets;
266 * Allocate a CCB and schedule a rescan.
268 ccb = xpt_alloc_ccb_nowait();
270 mps_dprint(sc, MPS_FAULT, "unable to alloc CCB for rescan\n");
274 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
275 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
276 mps_dprint(sc, MPS_FAULT, "unable to create path for rescan\n");
281 if (targetid == CAM_TARGET_WILDCARD)
282 ccb->ccb_h.func_code = XPT_SCAN_BUS;
284 ccb->ccb_h.func_code = XPT_SCAN_TGT;
286 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
287 mpssas_rescan(sassc, ccb);
291 mpssas_log_command(struct mps_command *cm, const char *fmt, ...)
301 sbuf_new(&sb, str, sizeof(str), 0);
305 if (cm->cm_ccb != NULL) {
306 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
308 sbuf_cat(&sb, path_str);
309 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
310 scsi_command_string(&cm->cm_ccb->csio, &sb);
311 sbuf_printf(&sb, "length %d ",
312 cm->cm_ccb->csio.dxfer_len);
316 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
317 cam_sim_name(cm->cm_sc->sassc->sim),
318 cam_sim_unit(cm->cm_sc->sassc->sim),
319 cam_sim_bus(cm->cm_sc->sassc->sim),
320 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
324 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
325 sbuf_vprintf(&sb, fmt, ap);
327 printf("%s", sbuf_data(&sb));
334 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
336 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
337 struct mpssas_target *targ;
340 mps_dprint(sc, MPS_INFO, "%s\n", __func__);
342 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
343 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
347 /* XXX retry the remove after the diag reset completes? */
348 mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
350 mpssas_free_tm(sc, tm);
354 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
355 mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
356 reply->IOCStatus, handle);
357 mpssas_free_tm(sc, tm);
361 mps_printf(sc, "Reset aborted %u commands\n", reply->TerminationCount);
362 mps_free_reply(sc, tm->cm_reply_data);
363 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
365 mps_printf(sc, "clearing target %u handle 0x%04x\n", targ->tid, handle);
368 * Don't clear target if remove fails because things will get confusing.
369 * Leave the devname and sasaddr intact so that we know to avoid reusing
370 * this target id if possible, and so we can assign the same target id
371 * to this device if it comes back in the future.
373 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
376 targ->encl_handle = 0x0;
377 targ->encl_slot = 0x0;
378 targ->exp_dev_handle = 0x0;
380 targ->linkrate = 0x0;
385 mpssas_free_tm(sc, tm);
390 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
391 * Otherwise Volume Delete is same as Bare Drive Removal.
394 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
396 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
397 struct mps_softc *sc;
398 struct mps_command *cm;
399 struct mpssas_target *targ = NULL;
401 mps_dprint(sassc->sc, MPS_INFO, "%s\n", __func__);
406 * If this is a WD controller, determine if the disk should be exposed
407 * to the OS or not. If disk should be exposed, return from this
408 * function without doing anything.
410 if (sc->WD_available && (sc->WD_hide_expose ==
411 MPS_WD_EXPOSE_ALWAYS)) {
416 targ = mpssas_find_target_by_handle(sassc, 0, handle);
418 /* FIXME: what is the action? */
419 /* We don't know about this device? */
420 printf("%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
424 targ->flags |= MPSSAS_TARGET_INREMOVAL;
426 cm = mpssas_alloc_tm(sc);
428 mps_printf(sc, "%s: command alloc failure\n", __func__);
432 mpssas_rescan_target(sc, targ);
434 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
435 req->DevHandle = targ->handle;
436 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
437 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
439 /* SAS Hard Link Reset / SATA Link Reset */
440 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
444 cm->cm_desc.HighPriority.RequestFlags =
445 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
446 cm->cm_complete = mpssas_remove_volume;
447 cm->cm_complete_data = (void *)(uintptr_t)handle;
448 mps_map_command(sc, cm);
452 * The MPT2 firmware performs debounce on the link to avoid transient link
453 * errors and false removals. When it does decide that link has been lost
454 * and a device need to go away, it expects that the host will perform a
455 * target reset and then an op remove. The reset has the side-effect of
456 * aborting any outstanding requests for the device, which is required for
457 * the op-remove to succeed. It's not clear if the host should check for
458 * the device coming back alive after the reset.
461 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
463 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
464 struct mps_softc *sc;
465 struct mps_command *cm;
466 struct mpssas_target *targ = NULL;
468 mps_dprint(sassc->sc, MPS_INFO, "%s\n", __func__);
472 targ = mpssas_find_target_by_handle(sassc, 0, handle);
474 /* FIXME: what is the action? */
475 /* We don't know about this device? */
476 printf("%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
480 targ->flags |= MPSSAS_TARGET_INREMOVAL;
482 cm = mpssas_alloc_tm(sc);
484 mps_printf(sc, "%s: command alloc failure\n", __func__);
488 mpssas_rescan_target(sc, targ);
490 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
491 memset(req, 0, sizeof(*req));
492 req->DevHandle = htole16(targ->handle);
493 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
494 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
496 /* SAS Hard Link Reset / SATA Link Reset */
497 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
501 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
502 cm->cm_complete = mpssas_remove_device;
503 cm->cm_complete_data = (void *)(uintptr_t)handle;
504 mps_map_command(sc, cm);
508 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
510 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
511 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
512 struct mpssas_target *targ;
513 struct mps_command *next_cm;
516 mps_dprint(sc, MPS_INFO, "%s\n", __func__);
518 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
519 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
523 * Currently there should be no way we can hit this case. It only
524 * happens when we have a failure to allocate chain frames, and
525 * task management commands don't have S/G lists.
527 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
528 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
529 "This should not happen!\n", __func__, tm->cm_flags,
531 mpssas_free_tm(sc, tm);
536 /* XXX retry the remove after the diag reset completes? */
537 mps_printf(sc, "%s NULL reply reseting device 0x%04x\n",
539 mpssas_free_tm(sc, tm);
543 if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
544 mps_printf(sc, "IOCStatus = 0x%x while resetting device 0x%x\n",
545 le16toh(reply->IOCStatus), handle);
546 mpssas_free_tm(sc, tm);
550 mps_dprint(sc, MPS_INFO, "Reset aborted %u commands\n",
551 le32toh(reply->TerminationCount));
552 mps_free_reply(sc, tm->cm_reply_data);
553 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
555 /* Reuse the existing command */
556 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
557 memset(req, 0, sizeof(*req));
558 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
559 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
560 req->DevHandle = htole16(handle);
562 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
563 tm->cm_complete = mpssas_remove_complete;
564 tm->cm_complete_data = (void *)(uintptr_t)handle;
566 mps_map_command(sc, tm);
568 mps_dprint(sc, MPS_INFO, "clearing target %u handle 0x%04x\n",
570 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
573 mps_dprint(sc, MPS_INFO, "Completing missed command %p\n", tm);
574 ccb = tm->cm_complete_data;
575 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
576 mpssas_scsiio_complete(sc, tm);
581 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
583 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
585 struct mpssas_target *targ;
586 struct mpssas_lun *lun;
588 mps_dprint(sc, MPS_INFO, "%s\n", __func__);
590 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
591 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
594 * Currently there should be no way we can hit this case. It only
595 * happens when we have a failure to allocate chain frames, and
596 * task management commands don't have S/G lists.
598 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
599 mps_printf(sc, "%s: cm_flags = %#x for remove of handle %#04x! "
600 "This should not happen!\n", __func__, tm->cm_flags,
602 mpssas_free_tm(sc, tm);
607 /* most likely a chip reset */
608 mps_printf(sc, "%s NULL reply removing device 0x%04x\n",
610 mpssas_free_tm(sc, tm);
614 mps_printf(sc, "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
615 handle, le16toh(reply->IOCStatus));
618 * Don't clear target if remove fails because things will get confusing.
619 * Leave the devname and sasaddr intact so that we know to avoid reusing
620 * this target id if possible, and so we can assign the same target id
621 * to this device if it comes back in the future.
623 if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
626 targ->encl_handle = 0x0;
627 targ->encl_slot = 0x0;
628 targ->exp_dev_handle = 0x0;
630 targ->linkrate = 0x0;
634 while(!SLIST_EMPTY(&targ->luns)) {
635 lun = SLIST_FIRST(&targ->luns);
636 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
642 mpssas_free_tm(sc, tm);
646 mpssas_register_events(struct mps_softc *sc)
648 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
651 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
652 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
653 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
654 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
655 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
656 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
657 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
658 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
659 setbit(events, MPI2_EVENT_IR_VOLUME);
660 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
661 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
662 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
664 mps_register_events(sc, events, mpssas_evt_handler, NULL,
665 &sc->sassc->mpssas_eh);
671 mps_attach_sas(struct mps_softc *sc)
673 struct mpssas_softc *sassc;
674 #if __FreeBSD_version >= 1000006
679 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
681 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
683 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
687 sassc->targets = malloc(sizeof(struct mpssas_target) *
688 sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
689 if(!sassc->targets) {
690 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
698 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
699 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIMQ\n");
704 unit = device_get_unit(sc->mps_dev);
705 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
706 unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
707 if (sassc->sim == NULL) {
708 mps_dprint(sc, MPS_FAULT, "Cannot allocate SIM\n");
713 TAILQ_INIT(&sassc->ev_queue);
715 /* Initialize taskqueue for Event Handling */
716 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
717 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
718 taskqueue_thread_enqueue, &sassc->ev_tq);
720 /* Run the task queue with lowest priority */
721 taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
722 device_get_nameunit(sc->mps_dev));
724 TAILQ_INIT(&sassc->ccb_scanq);
725 error = mps_kproc_create(mpssas_scanner_thread, sassc,
726 &sassc->rescan_thread, 0, 0, "mps_scan%d", unit);
728 mps_printf(sc, "Error %d starting rescan thread\n", error);
733 sassc->flags |= MPSSAS_SCANTHREAD;
736 * XXX There should be a bus for every port on the adapter, but since
737 * we're just going to fake the topology for now, we'll pretend that
738 * everything is just a target on a single bus.
740 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
741 mps_dprint(sc, MPS_FAULT, "Error %d registering SCSI bus\n",
748 * Assume that discovery events will start right away. Freezing
749 * the simq will prevent the CAM boottime scanner from running
750 * before discovery is complete.
752 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
753 xpt_freeze_simq(sassc->sim, 1);
754 sc->sassc->startup_refcount = 0;
756 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
757 sassc->discovery_timeouts = 0;
761 #if __FreeBSD_version >= 1000006
762 status = xpt_register_async(AC_ADVINFO_CHANGED, mpssas_async, sc, NULL);
763 if (status != CAM_REQ_CMP) {
764 mps_printf(sc, "Error %#x registering async handler for "
765 "AC_ADVINFO_CHANGED events\n", status);
771 mpssas_register_events(sc);
779 mps_detach_sas(struct mps_softc *sc)
781 struct mpssas_softc *sassc;
782 struct mpssas_lun *lun, *lun_tmp;
783 struct mpssas_target *targ;
786 mps_dprint(sc, MPS_INFO, "%s\n", __func__);
788 if (sc->sassc == NULL)
792 mps_deregister_events(sc, sassc->mpssas_eh);
795 * Drain and free the event handling taskqueue with the lock
796 * unheld so that any parallel processing tasks drain properly
797 * without deadlocking.
799 if (sassc->ev_tq != NULL)
800 taskqueue_free(sassc->ev_tq);
802 /* Make sure CAM doesn't wedge if we had to bail out early. */
805 /* Deregister our async handler */
806 #if __FreeBSD_version >= 1000006
807 xpt_register_async(0, mpssas_async, sc, NULL);
810 if (sassc->flags & MPSSAS_IN_STARTUP)
811 xpt_release_simq(sassc->sim, 1);
813 if (sassc->sim != NULL) {
814 xpt_bus_deregister(cam_sim_path(sassc->sim));
815 cam_sim_free(sassc->sim, FALSE);
818 if (sassc->flags & MPSSAS_SCANTHREAD) {
819 sassc->flags |= MPSSAS_SHUTDOWN;
820 wakeup(&sassc->ccb_scanq);
822 if (sassc->flags & MPSSAS_SCANTHREAD) {
823 msleep(&sassc->flags, &sc->mps_mtx, PRIBIO,
824 "mps_shutdown", 30 * hz);
829 mps_dprint(sc, MPS_INFO, "%s:%d\n", __func__,__LINE__);
830 if (sassc->devq != NULL)
831 cam_simq_free(sassc->devq);
833 for(i=0; i< sc->facts->MaxTargets ;i++) {
834 targ = &sassc->targets[i];
835 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
839 free(sassc->targets, M_MPT2);
847 mpssas_discovery_end(struct mpssas_softc *sassc)
849 struct mps_softc *sc = sassc->sc;
851 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
853 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
854 callout_stop(&sassc->discovery_callout);
859 mpssas_discovery_timeout(void *data)
861 struct mpssas_softc *sassc = data;
862 struct mps_softc *sc;
865 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
869 "Timeout waiting for discovery, interrupts may not be working!\n");
870 sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
872 /* Poll the hardware for events in case interrupts aren't working */
875 mps_printf(sassc->sc,
876 "Finished polling after discovery timeout at %d\n", ticks);
878 if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
879 mpssas_discovery_end(sassc);
881 if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
882 sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
883 callout_reset(&sassc->discovery_callout,
884 MPSSAS_DISCOVERY_TIMEOUT * hz,
885 mpssas_discovery_timeout, sassc);
886 sassc->discovery_timeouts++;
888 mps_dprint(sassc->sc, MPS_FAULT,
889 "Discovery timed out, continuing.\n");
890 sassc->flags &= ~MPSSAS_IN_DISCOVERY;
891 mpssas_discovery_end(sassc);
899 mpssas_action(struct cam_sim *sim, union ccb *ccb)
901 struct mpssas_softc *sassc;
903 sassc = cam_sim_softc(sim);
905 mps_dprint(sassc->sc, MPS_TRACE, "%s func 0x%x\n", __func__,
906 ccb->ccb_h.func_code);
907 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
909 switch (ccb->ccb_h.func_code) {
912 struct ccb_pathinq *cpi = &ccb->cpi;
914 cpi->version_num = 1;
915 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
916 cpi->target_sprt = 0;
917 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
918 cpi->hba_eng_cnt = 0;
919 cpi->max_target = sassc->sc->facts->MaxTargets - 1;
921 cpi->initiator_id = 255;
922 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
923 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
924 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
925 cpi->unit_number = cam_sim_unit(sim);
926 cpi->bus_id = cam_sim_bus(sim);
927 cpi->base_transfer_speed = 150000;
928 cpi->transport = XPORT_SAS;
929 cpi->transport_version = 0;
930 cpi->protocol = PROTO_SCSI;
931 cpi->protocol_version = SCSI_REV_SPC;
932 #if __FreeBSD_version >= 800001
934 * XXX KDM where does this number come from?
936 cpi->maxio = 256 * 1024;
938 cpi->ccb_h.status = CAM_REQ_CMP;
941 case XPT_GET_TRAN_SETTINGS:
943 struct ccb_trans_settings *cts;
944 struct ccb_trans_settings_sas *sas;
945 struct ccb_trans_settings_scsi *scsi;
946 struct mpssas_target *targ;
949 sas = &cts->xport_specific.sas;
950 scsi = &cts->proto_specific.scsi;
952 targ = &sassc->targets[cts->ccb_h.target_id];
953 if (targ->handle == 0x0) {
954 cts->ccb_h.status = CAM_SEL_TIMEOUT;
958 cts->protocol_version = SCSI_REV_SPC2;
959 cts->transport = XPORT_SAS;
960 cts->transport_version = 0;
962 sas->valid = CTS_SAS_VALID_SPEED;
963 switch (targ->linkrate) {
965 sas->bitrate = 150000;
968 sas->bitrate = 300000;
971 sas->bitrate = 600000;
977 cts->protocol = PROTO_SCSI;
978 scsi->valid = CTS_SCSI_VALID_TQ;
979 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
981 cts->ccb_h.status = CAM_REQ_CMP;
984 case XPT_CALC_GEOMETRY:
985 cam_calc_geometry(&ccb->ccg, /*extended*/1);
986 ccb->ccb_h.status = CAM_REQ_CMP;
989 mps_printf(sassc->sc, "mpssas_action XPT_RESET_DEV\n");
990 mpssas_action_resetdev(sassc, ccb);
995 mps_printf(sassc->sc, "mpssas_action faking success for "
997 ccb->ccb_h.status = CAM_REQ_CMP;
1000 mpssas_action_scsiio(sassc, ccb);
1002 #if __FreeBSD_version >= 900026
1004 mpssas_action_smpio(sassc, ccb);
1008 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1016 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1017 target_id_t target_id, lun_id_t lun_id)
1019 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1020 struct cam_path *path;
1022 mps_printf(sc, "%s code %x target %d lun %d\n", __func__,
1023 ac_code, target_id, lun_id);
1025 if (xpt_create_path(&path, NULL,
1026 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1027 mps_printf(sc, "unable to create path for reset "
1032 xpt_async(ac_code, path, NULL);
1033 xpt_free_path(path);
1037 mpssas_complete_all_commands(struct mps_softc *sc)
1039 struct mps_command *cm;
1043 mps_printf(sc, "%s\n", __func__);
1044 mtx_assert(&sc->mps_mtx, MA_OWNED);
1046 /* complete all commands with a NULL reply */
1047 for (i = 1; i < sc->num_reqs; i++) {
1048 cm = &sc->commands[i];
1049 cm->cm_reply = NULL;
1052 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1053 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1055 if (cm->cm_complete != NULL) {
1056 mpssas_log_command(cm,
1057 "completing cm %p state %x ccb %p for diag reset\n",
1058 cm, cm->cm_state, cm->cm_ccb);
1060 cm->cm_complete(sc, cm);
1064 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1065 mpssas_log_command(cm,
1066 "waking up cm %p state %x ccb %p for diag reset\n",
1067 cm, cm->cm_state, cm->cm_ccb);
1072 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1073 /* this should never happen, but if it does, log */
1074 mpssas_log_command(cm,
1075 "cm %p state %x flags 0x%x ccb %p during diag "
1076 "reset\n", cm, cm->cm_state, cm->cm_flags,
1083 mpssas_handle_reinit(struct mps_softc *sc)
1087 /* Go back into startup mode and freeze the simq, so that CAM
1088 * doesn't send any commands until after we've rediscovered all
1089 * targets and found the proper device handles for them.
1091 * After the reset, portenable will trigger discovery, and after all
1092 * discovery-related activities have finished, the simq will be
1095 mps_printf(sc, "%s startup\n", __func__);
1096 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1097 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1098 xpt_freeze_simq(sc->sassc->sim, 1);
1100 /* notify CAM of a bus reset */
1101 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1104 /* complete and cleanup after all outstanding commands */
1105 mpssas_complete_all_commands(sc);
1107 mps_printf(sc, "%s startup %u tm %u after command completion\n",
1108 __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1111 * The simq was explicitly frozen above, so set the refcount to 0.
1112 * The simq will be explicitly released after port enable completes.
1114 sc->sassc->startup_refcount = 0;
1116 /* zero all the target handles, since they may change after the
1117 * reset, and we have to rediscover all the targets and use the new
1120 for (i = 0; i < sc->facts->MaxTargets; i++) {
1121 if (sc->sassc->targets[i].outstanding != 0)
1122 mps_printf(sc, "target %u outstanding %u\n",
1123 i, sc->sassc->targets[i].outstanding);
1124 sc->sassc->targets[i].handle = 0x0;
1125 sc->sassc->targets[i].exp_dev_handle = 0x0;
1126 sc->sassc->targets[i].outstanding = 0;
1127 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1131 mpssas_tm_timeout(void *data)
1133 struct mps_command *tm = data;
1134 struct mps_softc *sc = tm->cm_sc;
1136 mtx_assert(&sc->mps_mtx, MA_OWNED);
1138 mpssas_log_command(tm, "task mgmt %p timed out\n", tm);
1143 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1145 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1146 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1147 unsigned int cm_count = 0;
1148 struct mps_command *cm;
1149 struct mpssas_target *targ;
1151 callout_stop(&tm->cm_callout);
1153 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1154 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1158 * Currently there should be no way we can hit this case. It only
1159 * happens when we have a failure to allocate chain frames, and
1160 * task management commands don't have S/G lists.
1162 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1163 mps_printf(sc, "%s: cm_flags = %#x for LUN reset! "
1164 "This should not happen!\n", __func__, tm->cm_flags);
1165 mpssas_free_tm(sc, tm);
1169 if (reply == NULL) {
1170 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1171 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1172 /* this completion was due to a reset, just cleanup */
1173 targ->flags &= ~MPSSAS_TARGET_INRESET;
1175 mpssas_free_tm(sc, tm);
1178 /* we should have gotten a reply. */
1184 mpssas_log_command(tm,
1185 "logical unit reset status 0x%x code 0x%x count %u\n",
1186 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1187 le32toh(reply->TerminationCount));
1189 /* See if there are any outstanding commands for this LUN.
1190 * This could be made more efficient by using a per-LU data
1191 * structure of some sort.
1193 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1194 if (cm->cm_lun == tm->cm_lun)
1198 if (cm_count == 0) {
1199 mpssas_log_command(tm,
1200 "logical unit %u finished recovery after reset\n",
1203 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1206 /* we've finished recovery for this logical unit. check and
1207 * see if some other logical unit has a timedout command
1208 * that needs to be processed.
1210 cm = TAILQ_FIRST(&targ->timedout_commands);
1212 mpssas_send_abort(sc, tm, cm);
1216 mpssas_free_tm(sc, tm);
1220 /* if we still have commands for this LUN, the reset
1221 * effectively failed, regardless of the status reported.
1222 * Escalate to a target reset.
1224 mpssas_log_command(tm,
1225 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1227 mpssas_send_reset(sc, tm,
1228 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1233 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1235 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1236 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1237 struct mpssas_target *targ;
1239 callout_stop(&tm->cm_callout);
1241 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1242 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1246 * Currently there should be no way we can hit this case. It only
1247 * happens when we have a failure to allocate chain frames, and
1248 * task management commands don't have S/G lists.
1250 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1251 mps_printf(sc, "%s: cm_flags = %#x for target reset! "
1252 "This should not happen!\n", __func__, tm->cm_flags);
1253 mpssas_free_tm(sc, tm);
1257 if (reply == NULL) {
1258 mpssas_log_command(tm, "NULL reset reply for tm %p\n", tm);
1259 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1260 /* this completion was due to a reset, just cleanup */
1261 targ->flags &= ~MPSSAS_TARGET_INRESET;
1263 mpssas_free_tm(sc, tm);
1266 /* we should have gotten a reply. */
1272 mpssas_log_command(tm,
1273 "target reset status 0x%x code 0x%x count %u\n",
1274 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1275 le32toh(reply->TerminationCount));
1277 targ->flags &= ~MPSSAS_TARGET_INRESET;
1279 if (targ->outstanding == 0) {
1280 /* we've finished recovery for this target and all
1281 * of its logical units.
1283 mpssas_log_command(tm,
1284 "recovery finished after target reset\n");
1286 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1290 mpssas_free_tm(sc, tm);
1293 /* after a target reset, if this target still has
1294 * outstanding commands, the reset effectively failed,
1295 * regardless of the status reported. escalate.
1297 mpssas_log_command(tm,
1298 "target reset complete for tm %p, but still have %u command(s)\n",
1299 tm, targ->outstanding);
1304 #define MPS_RESET_TIMEOUT 30
1307 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1309 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1310 struct mpssas_target *target;
1313 target = tm->cm_targ;
1314 if (target->handle == 0) {
1315 mps_printf(sc, "%s null devhandle for target_id %d\n",
1316 __func__, target->tid);
1320 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1321 req->DevHandle = htole16(target->handle);
1322 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1323 req->TaskType = type;
1325 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1326 /* XXX Need to handle invalid LUNs */
1327 MPS_SET_LUN(req->LUN, tm->cm_lun);
1328 tm->cm_targ->logical_unit_resets++;
1329 mpssas_log_command(tm, "sending logical unit reset\n");
1330 tm->cm_complete = mpssas_logical_unit_reset_complete;
1332 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1333 /* Target reset method = SAS Hard Link Reset / SATA Link Reset */
1334 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1335 tm->cm_targ->target_resets++;
1336 tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1337 mpssas_log_command(tm, "sending target reset\n");
1338 tm->cm_complete = mpssas_target_reset_complete;
1341 mps_printf(sc, "unexpected reset type 0x%x\n", type);
1346 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1347 tm->cm_complete_data = (void *)tm;
1349 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1350 mpssas_tm_timeout, tm);
1352 err = mps_map_command(sc, tm);
1354 mpssas_log_command(tm,
1355 "error %d sending reset type %u\n",
1363 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1365 struct mps_command *cm;
1366 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1367 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1368 struct mpssas_target *targ;
1370 callout_stop(&tm->cm_callout);
1372 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1373 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1377 * Currently there should be no way we can hit this case. It only
1378 * happens when we have a failure to allocate chain frames, and
1379 * task management commands don't have S/G lists.
1381 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1382 mpssas_log_command(tm,
1383 "cm_flags = %#x for abort %p TaskMID %u!\n",
1384 tm->cm_flags, tm, le16toh(req->TaskMID));
1385 mpssas_free_tm(sc, tm);
1389 if (reply == NULL) {
1390 mpssas_log_command(tm,
1391 "NULL abort reply for tm %p TaskMID %u\n",
1392 tm, le16toh(req->TaskMID));
1393 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1394 /* this completion was due to a reset, just cleanup */
1396 mpssas_free_tm(sc, tm);
1399 /* we should have gotten a reply. */
1405 mpssas_log_command(tm,
1406 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1407 le16toh(req->TaskMID),
1408 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1409 le32toh(reply->TerminationCount));
1411 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1413 /* if there are no more timedout commands, we're done with
1414 * error recovery for this target.
1416 mpssas_log_command(tm,
1417 "finished recovery after aborting TaskMID %u\n",
1418 le16toh(req->TaskMID));
1421 mpssas_free_tm(sc, tm);
1423 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1424 /* abort success, but we have more timedout commands to abort */
1425 mpssas_log_command(tm,
1426 "continuing recovery after aborting TaskMID %u\n",
1427 le16toh(req->TaskMID));
1429 mpssas_send_abort(sc, tm, cm);
1432 /* we didn't get a command completion, so the abort
1433 * failed as far as we're concerned. escalate.
1435 mpssas_log_command(tm,
1436 "abort failed for TaskMID %u tm %p\n",
1437 le16toh(req->TaskMID), tm);
1439 mpssas_send_reset(sc, tm,
1440 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1444 #define MPS_ABORT_TIMEOUT 5
1447 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1449 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1450 struct mpssas_target *targ;
1454 if (targ->handle == 0) {
1455 mps_printf(sc, "%s null devhandle for target_id %d\n",
1456 __func__, cm->cm_ccb->ccb_h.target_id);
1460 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1461 req->DevHandle = htole16(targ->handle);
1462 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1463 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1465 /* XXX Need to handle invalid LUNs */
1466 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1468 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1471 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1472 tm->cm_complete = mpssas_abort_complete;
1473 tm->cm_complete_data = (void *)tm;
1474 tm->cm_targ = cm->cm_targ;
1475 tm->cm_lun = cm->cm_lun;
1477 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1478 mpssas_tm_timeout, tm);
1482 err = mps_map_command(sc, tm);
1484 mpssas_log_command(tm,
1485 "error %d sending abort for cm %p SMID %u\n",
1486 err, cm, req->TaskMID);
1492 mpssas_scsiio_timeout(void *data)
1494 struct mps_softc *sc;
1495 struct mps_command *cm;
1496 struct mpssas_target *targ;
1498 cm = (struct mps_command *)data;
1501 mtx_assert(&sc->mps_mtx, MA_OWNED);
1503 mps_printf(sc, "%s checking sc %p cm %p\n", __func__, sc, cm);
1506 * Run the interrupt handler to make sure it's not pending. This
1507 * isn't perfect because the command could have already completed
1508 * and been re-used, though this is unlikely.
1510 mps_intr_locked(sc);
1511 if (cm->cm_state == MPS_CM_STATE_FREE) {
1512 mps_printf(sc, "SCSI command %p sc %p almost timed out\n", cm, sc);
1516 if (cm->cm_ccb == NULL) {
1517 mps_printf(sc, "command timeout with NULL ccb\n");
1521 mpssas_log_command(cm, "command timeout cm %p ccb %p\n",
1527 /* XXX first, check the firmware state, to see if it's still
1528 * operational. if not, do a diag reset.
1531 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1532 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1533 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1535 if (targ->tm != NULL) {
1536 /* target already in recovery, just queue up another
1537 * timedout command to be processed later.
1539 mps_printf(sc, "queued timedout cm %p for processing by tm %p\n",
1542 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1543 mps_printf(sc, "timedout cm %p allocated tm %p\n",
1546 /* start recovery by aborting the first timedout command */
1547 mpssas_send_abort(sc, targ->tm, cm);
1550 /* XXX queue this target up for recovery once a TM becomes
1551 * available. The firmware only has a limited number of
1552 * HighPriority credits for the high priority requests used
1553 * for task management, and we ran out.
1555 * Isilon: don't worry about this for now, since we have
1556 * more credits than disks in an enclosure, and limit
1557 * ourselves to one TM per target for recovery.
1559 mps_printf(sc, "timedout cm %p failed to allocate a tm\n",
1566 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1568 MPI2_SCSI_IO_REQUEST *req;
1569 struct ccb_scsiio *csio;
1570 struct mps_softc *sc;
1571 struct mpssas_target *targ;
1572 struct mpssas_lun *lun;
1573 struct mps_command *cm;
1574 uint8_t i, lba_byte, *ref_tag_addr;
1575 uint16_t eedp_flags;
1576 uint32_t mpi_control;
1579 mtx_assert(&sc->mps_mtx, MA_OWNED);
1582 targ = &sassc->targets[csio->ccb_h.target_id];
1583 mps_dprint(sc, MPS_TRACE, "%s ccb %p target flag %x\n", __func__, ccb, targ->flags);
1584 if (targ->handle == 0x0) {
1585 mps_dprint(sc, MPS_TRACE, "%s NULL handle for target %u\n",
1586 __func__, csio->ccb_h.target_id);
1587 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1591 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1592 mps_dprint(sc, MPS_TRACE, "%s Raid component no SCSI IO supported %u\n",
1593 __func__, csio->ccb_h.target_id);
1594 csio->ccb_h.status = CAM_TID_INVALID;
1599 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1600 * that the volume has timed out. We want volumes to be enumerated
1601 * until they are deleted/removed, not just failed.
1603 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1604 if (targ->devinfo == 0)
1605 csio->ccb_h.status = CAM_REQ_CMP;
1607 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1612 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1613 mps_dprint(sc, MPS_TRACE, "%s shutting down\n", __func__);
1614 csio->ccb_h.status = CAM_TID_INVALID;
1619 cm = mps_alloc_command(sc);
1621 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1622 xpt_freeze_simq(sassc->sim, 1);
1623 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1625 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1626 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1631 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1632 bzero(req, sizeof(*req));
1633 req->DevHandle = htole16(targ->handle);
1634 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1636 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1637 req->SenseBufferLength = MPS_SENSE_LEN;
1639 req->ChainOffset = 0;
1640 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1645 req->DataLength = htole32(csio->dxfer_len);
1646 req->BidirectionalDataLength = 0;
1647 req->IoFlags = htole16(csio->cdb_len);
1650 /* Note: BiDirectional transfers are not supported */
1651 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1653 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1654 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1657 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1658 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1662 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1666 if (csio->cdb_len == 32)
1667 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1669 * It looks like the hardware doesn't require an explicit tag
1670 * number for each transaction. SAM Task Management not supported
1673 switch (csio->tag_action) {
1674 case MSG_HEAD_OF_Q_TAG:
1675 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1677 case MSG_ORDERED_Q_TAG:
1678 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1681 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1683 case CAM_TAG_ACTION_NONE:
1684 case MSG_SIMPLE_Q_TAG:
1686 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1689 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1690 req->Control = htole32(mpi_control);
1691 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1692 mps_free_command(sc, cm);
1693 ccb->ccb_h.status = CAM_LUN_INVALID;
1698 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1699 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1701 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1702 req->IoFlags = htole16(csio->cdb_len);
1705 * Check if EEDP is supported and enabled. If it is then check if the
1706 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1707 * is formatted for EEDP support. If all of this is true, set CDB up
1708 * for EEDP transfer.
1710 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1711 if (sc->eedp_enabled && eedp_flags) {
1712 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1713 if (lun->lun_id == csio->ccb_h.target_lun) {
1718 if ((lun != NULL) && (lun->eedp_formatted)) {
1719 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1720 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1721 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1722 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1723 req->EEDPFlags = htole16(eedp_flags);
1726 * If CDB less than 32, fill in Primary Ref Tag with
1727 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1728 * already there. Also, set protection bit. FreeBSD
1729 * currently does not support CDBs bigger than 16, but
1730 * the code doesn't hurt, and will be here for the
1733 if (csio->cdb_len != 32) {
1734 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1735 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1736 PrimaryReferenceTag;
1737 for (i = 0; i < 4; i++) {
1739 req->CDB.CDB32[lba_byte + i];
1742 req->CDB.EEDP32.PrimaryReferenceTag =
1743 htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1744 req->CDB.EEDP32.PrimaryApplicationTagMask =
1746 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1750 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1751 req->EEDPFlags = htole16(eedp_flags);
1752 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1758 cm->cm_length = csio->dxfer_len;
1759 if (cm->cm_length != 0) {
1761 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1765 cm->cm_sge = &req->SGL;
1766 cm->cm_sglsize = (32 - 24) * 4;
1767 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1768 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1769 cm->cm_complete = mpssas_scsiio_complete;
1770 cm->cm_complete_data = ccb;
1772 cm->cm_lun = csio->ccb_h.target_lun;
1776 * If HBA is a WD and the command is not for a retry, try to build a
1777 * direct I/O message. If failed, or the command is for a retry, send
1778 * the I/O to the IR volume itself.
1780 if (sc->WD_valid_config) {
1781 if (ccb->ccb_h.status != MPS_WD_RETRY) {
1782 mpssas_direct_drive_io(sassc, cm, ccb);
1784 ccb->ccb_h.status = CAM_REQ_INPROG;
1788 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1789 mpssas_scsiio_timeout, cm);
1792 targ->outstanding++;
1793 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1795 if ((sc->mps_debug & MPS_TRACE) != 0)
1796 mpssas_log_command(cm, "%s cm %p ccb %p outstanding %u\n",
1797 __func__, cm, ccb, targ->outstanding);
1799 mps_map_command(sc, cm);
1804 mps_response_code(struct mps_softc *sc, u8 response_code)
1808 switch (response_code) {
1809 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1810 desc = "task management request completed";
1812 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1813 desc = "invalid frame";
1815 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1816 desc = "task management request not supported";
1818 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1819 desc = "task management request failed";
1821 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1822 desc = "task management request succeeded";
1824 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1825 desc = "invalid lun";
1828 desc = "overlapped tag attempted";
1830 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1831 desc = "task queued, however not sent to target";
1837 mps_dprint(sc, MPS_INFO, "response_code(0x%01x): %s\n",
1838 response_code, desc);
1841 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1844 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1845 Mpi2SCSIIOReply_t *mpi_reply)
1849 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1850 MPI2_IOCSTATUS_MASK;
1851 u8 scsi_state = mpi_reply->SCSIState;
1852 u8 scsi_status = mpi_reply->SCSIStatus;
1853 char *desc_ioc_state = NULL;
1854 char *desc_scsi_status = NULL;
1855 char *desc_scsi_state = sc->tmp_string;
1856 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1858 if (log_info == 0x31170000)
1861 switch (ioc_status) {
1862 case MPI2_IOCSTATUS_SUCCESS:
1863 desc_ioc_state = "success";
1865 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1866 desc_ioc_state = "invalid function";
1868 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1869 desc_ioc_state = "scsi recovered error";
1871 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1872 desc_ioc_state = "scsi invalid dev handle";
1874 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1875 desc_ioc_state = "scsi device not there";
1877 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1878 desc_ioc_state = "scsi data overrun";
1880 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1881 desc_ioc_state = "scsi data underrun";
1883 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1884 desc_ioc_state = "scsi io data error";
1886 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1887 desc_ioc_state = "scsi protocol error";
1889 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1890 desc_ioc_state = "scsi task terminated";
1892 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1893 desc_ioc_state = "scsi residual mismatch";
1895 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1896 desc_ioc_state = "scsi task mgmt failed";
1898 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1899 desc_ioc_state = "scsi ioc terminated";
1901 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1902 desc_ioc_state = "scsi ext terminated";
1904 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1905 desc_ioc_state = "eedp guard error";
1907 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1908 desc_ioc_state = "eedp ref tag error";
1910 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1911 desc_ioc_state = "eedp app tag error";
1914 desc_ioc_state = "unknown";
1918 switch (scsi_status) {
1919 case MPI2_SCSI_STATUS_GOOD:
1920 desc_scsi_status = "good";
1922 case MPI2_SCSI_STATUS_CHECK_CONDITION:
1923 desc_scsi_status = "check condition";
1925 case MPI2_SCSI_STATUS_CONDITION_MET:
1926 desc_scsi_status = "condition met";
1928 case MPI2_SCSI_STATUS_BUSY:
1929 desc_scsi_status = "busy";
1931 case MPI2_SCSI_STATUS_INTERMEDIATE:
1932 desc_scsi_status = "intermediate";
1934 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
1935 desc_scsi_status = "intermediate condmet";
1937 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
1938 desc_scsi_status = "reservation conflict";
1940 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
1941 desc_scsi_status = "command terminated";
1943 case MPI2_SCSI_STATUS_TASK_SET_FULL:
1944 desc_scsi_status = "task set full";
1946 case MPI2_SCSI_STATUS_ACA_ACTIVE:
1947 desc_scsi_status = "aca active";
1949 case MPI2_SCSI_STATUS_TASK_ABORTED:
1950 desc_scsi_status = "task aborted";
1953 desc_scsi_status = "unknown";
1957 desc_scsi_state[0] = '\0';
1959 desc_scsi_state = " ";
1960 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
1961 strcat(desc_scsi_state, "response info ");
1962 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
1963 strcat(desc_scsi_state, "state terminated ");
1964 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
1965 strcat(desc_scsi_state, "no status ");
1966 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
1967 strcat(desc_scsi_state, "autosense failed ");
1968 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
1969 strcat(desc_scsi_state, "autosense valid ");
1971 mps_dprint(sc, MPS_INFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x), \n",
1972 le16toh(mpi_reply->DevHandle),
1973 desc_ioc_state, ioc_status);
1974 /* We can add more detail about underflow data here
1977 mps_dprint(sc, MPS_INFO, "\tscsi_status(%s)(0x%02x), "
1978 "scsi_state(%s)(0x%02x)\n", desc_scsi_status,
1979 scsi_status, desc_scsi_state, scsi_state);
1981 if (sc->mps_debug & MPS_INFO &&
1982 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1983 mps_dprint(sc, MPS_INFO, "-> Sense Buffer Data : Start :\n");
1984 scsi_sense_print(csio);
1985 mps_dprint(sc, MPS_INFO, "-> Sense Buffer Data : End :\n");
1988 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1989 response_info = le32toh(mpi_reply->ResponseInfo);
1990 response_bytes = (u8 *)&response_info;
1991 mps_response_code(sc,response_bytes[0]);
1996 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1998 MPI2_SCSI_IO_REPLY *rep;
2000 struct ccb_scsiio *csio;
2001 struct mpssas_softc *sassc;
2002 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2003 u8 *TLR_bits, TLR_on;
2007 mps_dprint(sc, MPS_TRACE,
2008 "%s cm %p SMID %u ccb %p reply %p outstanding %u\n",
2009 __func__, cm, cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2010 cm->cm_targ->outstanding);
2012 callout_stop(&cm->cm_callout);
2013 mtx_assert(&sc->mps_mtx, MA_OWNED);
2016 ccb = cm->cm_complete_data;
2018 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2020 * XXX KDM if the chain allocation fails, does it matter if we do
2021 * the sync and unload here? It is simpler to do it in every case,
2022 * assuming it doesn't cause problems.
2024 if (cm->cm_data != NULL) {
2025 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2026 dir = BUS_DMASYNC_POSTREAD;
2027 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2028 dir = BUS_DMASYNC_POSTWRITE;
2029 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2030 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2033 cm->cm_targ->completed++;
2034 cm->cm_targ->outstanding--;
2035 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2037 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2038 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2039 if (cm->cm_reply != NULL)
2040 mpssas_log_command(cm,
2041 "completed timedout cm %p ccb %p during recovery "
2042 "ioc %x scsi %x state %x xfer %u\n",
2044 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2045 le32toh(rep->TransferCount));
2047 mpssas_log_command(cm,
2048 "completed timedout cm %p ccb %p during recovery\n",
2050 } else if (cm->cm_targ->tm != NULL) {
2051 if (cm->cm_reply != NULL)
2052 mpssas_log_command(cm,
2053 "completed cm %p ccb %p during recovery "
2054 "ioc %x scsi %x state %x xfer %u\n",
2056 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2057 le32toh(rep->TransferCount));
2059 mpssas_log_command(cm,
2060 "completed cm %p ccb %p during recovery\n",
2062 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2063 mpssas_log_command(cm,
2064 "reset completed cm %p ccb %p\n",
2068 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2070 * We ran into an error after we tried to map the command,
2071 * so we're getting a callback without queueing the command
2072 * to the hardware. So we set the status here, and it will
2073 * be retained below. We'll go through the "fast path",
2074 * because there can be no reply when we haven't actually
2075 * gone out to the hardware.
2077 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2080 * Currently the only error included in the mask is
2081 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2082 * chain frames. We need to freeze the queue until we get
2083 * a command that completed without this error, which will
2084 * hopefully have some chain frames attached that we can
2085 * use. If we wanted to get smarter about it, we would
2086 * only unfreeze the queue in this condition when we're
2087 * sure that we're getting some chain frames back. That's
2088 * probably unnecessary.
2090 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2091 xpt_freeze_simq(sassc->sim, 1);
2092 sassc->flags |= MPSSAS_QUEUE_FROZEN;
2093 mps_dprint(sc, MPS_INFO, "Error sending command, "
2094 "freezing SIM queue\n");
2098 /* Take the fast path to completion */
2099 if (cm->cm_reply == NULL) {
2100 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2101 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2102 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2104 ccb->ccb_h.status = CAM_REQ_CMP;
2105 ccb->csio.scsi_status = SCSI_STATUS_OK;
2107 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2108 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2109 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2110 mps_dprint(sc, MPS_INFO,
2111 "Unfreezing SIM queue\n");
2116 * There are two scenarios where the status won't be
2117 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
2118 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2120 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2122 * Freeze the dev queue so that commands are
2123 * executed in the correct order with after error
2126 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2127 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2129 mps_free_command(sc, cm);
2134 if (sc->mps_debug & MPS_TRACE)
2135 mpssas_log_command(cm,
2136 "ioc %x scsi %x state %x xfer %u\n",
2137 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2138 le32toh(rep->TransferCount));
2141 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2142 * Volume if an error occurred (normal I/O retry). Use the original
2143 * CCB, but set a flag that this will be a retry so that it's sent to
2144 * the original volume. Free the command but reuse the CCB.
2146 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2147 mps_free_command(sc, cm);
2148 ccb->ccb_h.status = MPS_WD_RETRY;
2149 mpssas_action_scsiio(sassc, ccb);
2153 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2154 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2155 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2157 case MPI2_IOCSTATUS_SUCCESS:
2158 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2160 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2161 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2162 mpssas_log_command(cm, "recovered error\n");
2164 /* Completion failed at the transport level. */
2165 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2166 MPI2_SCSI_STATE_TERMINATED)) {
2167 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2171 /* In a modern packetized environment, an autosense failure
2172 * implies that there's not much else that can be done to
2173 * recover the command.
2175 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2176 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2181 * CAM doesn't care about SAS Response Info data, but if this is
2182 * the state check if TLR should be done. If not, clear the
2183 * TLR_bits for the target.
2185 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2186 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) ==
2187 MPS_SCSI_RI_INVALID_FRAME)) {
2188 sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2189 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2193 * Intentionally override the normal SCSI status reporting
2194 * for these two cases. These are likely to happen in a
2195 * multi-initiator environment, and we want to make sure that
2196 * CAM retries these commands rather than fail them.
2198 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2199 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2200 ccb->ccb_h.status = CAM_REQ_ABORTED;
2204 /* Handle normal status and sense */
2205 csio->scsi_status = rep->SCSIStatus;
2206 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2207 ccb->ccb_h.status = CAM_REQ_CMP;
2209 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2211 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2212 int sense_len, returned_sense_len;
2214 returned_sense_len = min(le32toh(rep->SenseCount),
2215 sizeof(struct scsi_sense_data));
2216 if (returned_sense_len < ccb->csio.sense_len)
2217 ccb->csio.sense_resid = ccb->csio.sense_len -
2220 ccb->csio.sense_resid = 0;
2222 sense_len = min(returned_sense_len,
2223 ccb->csio.sense_len - ccb->csio.sense_resid);
2224 bzero(&ccb->csio.sense_data,
2225 sizeof(ccb->csio.sense_data));
2226 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2227 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2231 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2232 * and it's page code 0 (Supported Page List), and there is
2233 * inquiry data, and this is for a sequential access device, and
2234 * the device is an SSP target, and TLR is supported by the
2235 * controller, turn the TLR_bits value ON if page 0x90 is
2238 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2239 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2240 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2241 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2242 (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
2243 T_SEQUENTIAL) && (sc->control_TLR) &&
2244 (sc->mapping_table[csio->ccb_h.target_id].device_info &
2245 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2246 vpd_list = (struct scsi_vpd_supported_page_list *)
2248 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2250 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2251 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2252 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2253 csio->cdb_io.cdb_bytes[4];
2254 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2255 if (vpd_list->list[i] == 0x90) {
2262 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2263 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2265 * If devinfo is 0 this will be a volume. In that case don't
2266 * tell CAM that the volume is not there. We want volumes to
2267 * be enumerated until they are deleted/removed, not just
2270 if (cm->cm_targ->devinfo == 0)
2271 ccb->ccb_h.status = CAM_REQ_CMP;
2273 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2275 case MPI2_IOCSTATUS_INVALID_SGL:
2276 mps_print_scsiio_cmd(sc, cm);
2277 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2279 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2281 * This is one of the responses that comes back when an I/O
2282 * has been aborted. If it is because of a timeout that we
2283 * initiated, just set the status to CAM_CMD_TIMEOUT.
2284 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2285 * command is the same (it gets retried, subject to the
2286 * retry counter), the only difference is what gets printed
2289 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2290 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2292 ccb->ccb_h.status = CAM_REQ_ABORTED;
2294 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2295 /* resid is ignored for this condition */
2297 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2299 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2300 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2302 * Since these are generally external (i.e. hopefully
2303 * transient transport-related) errors, retry these without
2304 * decrementing the retry count.
2306 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2307 mpssas_log_command(cm,
2308 "terminated ioc %x scsi %x state %x xfer %u\n",
2309 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2310 le32toh(rep->TransferCount));
2312 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2313 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2314 case MPI2_IOCSTATUS_INVALID_VPID:
2315 case MPI2_IOCSTATUS_INVALID_FIELD:
2316 case MPI2_IOCSTATUS_INVALID_STATE:
2317 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2318 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2319 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2320 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2321 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2323 mpssas_log_command(cm,
2324 "completed ioc %x scsi %x state %x xfer %u\n",
2325 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2326 le32toh(rep->TransferCount));
2327 csio->resid = cm->cm_length;
2328 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2332 mps_sc_failed_io_info(sc,csio,rep);
2334 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2335 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2336 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2337 mps_dprint(sc, MPS_INFO, "Command completed, "
2338 "unfreezing SIM queue\n");
2341 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2342 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2343 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2346 mps_free_command(sc, cm);
2350 /* All Request reached here are Endian safe */
2352 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2354 pMpi2SCSIIORequest_t pIO_req;
2355 struct mps_softc *sc = sassc->sc;
2357 uint32_t physLBA, stripe_offset, stripe_unit;
2358 uint32_t io_size, column;
2359 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2362 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2363 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2364 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2365 * bit different than the 10/16 CDBs, handle them separately.
2367 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2368 CDB = pIO_req->CDB.CDB32;
2371 * Handle 6 byte CDBs.
2373 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2374 (CDB[0] == WRITE_6))) {
2376 * Get the transfer size in blocks.
2378 io_size = (cm->cm_length >> sc->DD_block_exponent);
2381 * Get virtual LBA given in the CDB.
2383 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2384 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2387 * Check that LBA range for I/O does not exceed volume's
2390 if ((virtLBA + (uint64_t)io_size - 1) <=
2393 * Check if the I/O crosses a stripe boundary. If not,
2394 * translate the virtual LBA to a physical LBA and set
2395 * the DevHandle for the PhysDisk to be used. If it
2396 * does cross a boundry, do normal I/O. To get the
2397 * right DevHandle to use, get the map number for the
2398 * column, then use that map number to look up the
2399 * DevHandle of the PhysDisk.
2401 stripe_offset = (uint32_t)virtLBA &
2402 (sc->DD_stripe_size - 1);
2403 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2404 physLBA = (uint32_t)virtLBA >>
2405 sc->DD_stripe_exponent;
2406 stripe_unit = physLBA / sc->DD_num_phys_disks;
2407 column = physLBA % sc->DD_num_phys_disks;
2408 pIO_req->DevHandle =
2409 htole16(sc->DD_column_map[column].dev_handle);
2410 /* ???? Is this endian safe*/
2411 cm->cm_desc.SCSIIO.DevHandle =
2414 physLBA = (stripe_unit <<
2415 sc->DD_stripe_exponent) + stripe_offset;
2416 ptrLBA = &pIO_req->CDB.CDB32[1];
2417 physLBA_byte = (uint8_t)(physLBA >> 16);
2418 *ptrLBA = physLBA_byte;
2419 ptrLBA = &pIO_req->CDB.CDB32[2];
2420 physLBA_byte = (uint8_t)(physLBA >> 8);
2421 *ptrLBA = physLBA_byte;
2422 ptrLBA = &pIO_req->CDB.CDB32[3];
2423 physLBA_byte = (uint8_t)physLBA;
2424 *ptrLBA = physLBA_byte;
2427 * Set flag that Direct Drive I/O is
2430 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2437 * Handle 10, 12 or 16 byte CDBs.
2439 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2440 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2441 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2442 (CDB[0] == WRITE_12))) {
2444 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2445 * are 0. If not, this is accessing beyond 2TB so handle it in
2446 * the else section. 10-byte and 12-byte CDB's are OK.
2447 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2448 * ready to accept 12byte CDB for Direct IOs.
2450 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2451 (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2452 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2454 * Get the transfer size in blocks.
2456 io_size = (cm->cm_length >> sc->DD_block_exponent);
2459 * Get virtual LBA. Point to correct lower 4 bytes of
2460 * LBA in the CDB depending on command.
2462 lba_idx = ((CDB[0] == READ_12) ||
2463 (CDB[0] == WRITE_12) ||
2464 (CDB[0] == READ_10) ||
2465 (CDB[0] == WRITE_10))? 2 : 6;
2466 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2467 ((uint64_t)CDB[lba_idx + 1] << 16) |
2468 ((uint64_t)CDB[lba_idx + 2] << 8) |
2469 (uint64_t)CDB[lba_idx + 3];
2472 * Check that LBA range for I/O does not exceed volume's
2475 if ((virtLBA + (uint64_t)io_size - 1) <=
2478 * Check if the I/O crosses a stripe boundary.
2479 * If not, translate the virtual LBA to a
2480 * physical LBA and set the DevHandle for the
2481 * PhysDisk to be used. If it does cross a
2482 * boundry, do normal I/O. To get the right
2483 * DevHandle to use, get the map number for the
2484 * column, then use that map number to look up
2485 * the DevHandle of the PhysDisk.
2487 stripe_offset = (uint32_t)virtLBA &
2488 (sc->DD_stripe_size - 1);
2489 if ((stripe_offset + io_size) <=
2490 sc->DD_stripe_size) {
2491 physLBA = (uint32_t)virtLBA >>
2492 sc->DD_stripe_exponent;
2493 stripe_unit = physLBA /
2494 sc->DD_num_phys_disks;
2496 sc->DD_num_phys_disks;
2497 pIO_req->DevHandle =
2498 htole16(sc->DD_column_map[column].
2500 cm->cm_desc.SCSIIO.DevHandle =
2503 physLBA = (stripe_unit <<
2504 sc->DD_stripe_exponent) +
2507 &pIO_req->CDB.CDB32[lba_idx];
2508 physLBA_byte = (uint8_t)(physLBA >> 24);
2509 *ptrLBA = physLBA_byte;
2511 &pIO_req->CDB.CDB32[lba_idx + 1];
2512 physLBA_byte = (uint8_t)(physLBA >> 16);
2513 *ptrLBA = physLBA_byte;
2515 &pIO_req->CDB.CDB32[lba_idx + 2];
2516 physLBA_byte = (uint8_t)(physLBA >> 8);
2517 *ptrLBA = physLBA_byte;
2519 &pIO_req->CDB.CDB32[lba_idx + 3];
2520 physLBA_byte = (uint8_t)physLBA;
2521 *ptrLBA = physLBA_byte;
2524 * Set flag that Direct Drive I/O is
2527 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2532 * 16-byte CDB and the upper 4 bytes of the CDB are not
2533 * 0. Get the transfer size in blocks.
2535 io_size = (cm->cm_length >> sc->DD_block_exponent);
2540 virtLBA = ((uint64_t)CDB[2] << 54) |
2541 ((uint64_t)CDB[3] << 48) |
2542 ((uint64_t)CDB[4] << 40) |
2543 ((uint64_t)CDB[5] << 32) |
2544 ((uint64_t)CDB[6] << 24) |
2545 ((uint64_t)CDB[7] << 16) |
2546 ((uint64_t)CDB[8] << 8) |
2550 * Check that LBA range for I/O does not exceed volume's
2553 if ((virtLBA + (uint64_t)io_size - 1) <=
2556 * Check if the I/O crosses a stripe boundary.
2557 * If not, translate the virtual LBA to a
2558 * physical LBA and set the DevHandle for the
2559 * PhysDisk to be used. If it does cross a
2560 * boundry, do normal I/O. To get the right
2561 * DevHandle to use, get the map number for the
2562 * column, then use that map number to look up
2563 * the DevHandle of the PhysDisk.
2565 stripe_offset = (uint32_t)virtLBA &
2566 (sc->DD_stripe_size - 1);
2567 if ((stripe_offset + io_size) <=
2568 sc->DD_stripe_size) {
2569 physLBA = (uint32_t)(virtLBA >>
2570 sc->DD_stripe_exponent);
2571 stripe_unit = physLBA /
2572 sc->DD_num_phys_disks;
2574 sc->DD_num_phys_disks;
2575 pIO_req->DevHandle =
2576 htole16(sc->DD_column_map[column].
2578 cm->cm_desc.SCSIIO.DevHandle =
2581 physLBA = (stripe_unit <<
2582 sc->DD_stripe_exponent) +
2586 * Set upper 4 bytes of LBA to 0. We
2587 * assume that the phys disks are less
2588 * than 2 TB's in size. Then, set the
2591 pIO_req->CDB.CDB32[2] = 0;
2592 pIO_req->CDB.CDB32[3] = 0;
2593 pIO_req->CDB.CDB32[4] = 0;
2594 pIO_req->CDB.CDB32[5] = 0;
2595 ptrLBA = &pIO_req->CDB.CDB32[6];
2596 physLBA_byte = (uint8_t)(physLBA >> 24);
2597 *ptrLBA = physLBA_byte;
2598 ptrLBA = &pIO_req->CDB.CDB32[7];
2599 physLBA_byte = (uint8_t)(physLBA >> 16);
2600 *ptrLBA = physLBA_byte;
2601 ptrLBA = &pIO_req->CDB.CDB32[8];
2602 physLBA_byte = (uint8_t)(physLBA >> 8);
2603 *ptrLBA = physLBA_byte;
2604 ptrLBA = &pIO_req->CDB.CDB32[9];
2605 physLBA_byte = (uint8_t)physLBA;
2606 *ptrLBA = physLBA_byte;
2609 * Set flag that Direct Drive I/O is
2612 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2619 #if __FreeBSD_version >= 900026
2621 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2623 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2624 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2628 ccb = cm->cm_complete_data;
2631 * Currently there should be no way we can hit this case. It only
2632 * happens when we have a failure to allocate chain frames, and SMP
2633 * commands require two S/G elements only. That should be handled
2634 * in the standard request size.
2636 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2637 mps_printf(sc, "%s: cm_flags = %#x on SMP request!\n",
2638 __func__, cm->cm_flags);
2639 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2643 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2645 mps_dprint(sc, MPS_INFO, "%s: NULL cm_reply!\n", __func__);
2646 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2650 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2651 sasaddr = le32toh(req->SASAddress.Low);
2652 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2654 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS ||
2655 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2656 mps_dprint(sc, MPS_INFO, "%s: IOCStatus %04x SASStatus %02x\n",
2657 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2658 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2662 mps_dprint(sc, MPS_INFO, "%s: SMP request to SAS address "
2663 "%#jx completed successfully\n", __func__,
2664 (uintmax_t)sasaddr);
2666 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2667 ccb->ccb_h.status = CAM_REQ_CMP;
2669 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2673 * We sync in both directions because we had DMAs in the S/G list
2674 * in both directions.
2676 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2677 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2678 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2679 mps_free_command(sc, cm);
2684 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2686 struct mps_command *cm;
2687 uint8_t *request, *response;
2688 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2689 struct mps_softc *sc;
2698 * XXX We don't yet support physical addresses here.
2700 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2701 case CAM_DATA_PADDR:
2702 case CAM_DATA_SG_PADDR:
2703 mps_printf(sc, "%s: physical addresses not supported\n",
2705 ccb->ccb_h.status = CAM_REQ_INVALID;
2710 * The chip does not support more than one buffer for the
2711 * request or response.
2713 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2714 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2715 mps_printf(sc, "%s: multiple request or response "
2716 "buffer segments not supported for SMP\n",
2718 ccb->ccb_h.status = CAM_REQ_INVALID;
2724 * The CAM_SCATTER_VALID flag was originally implemented
2725 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2726 * We have two. So, just take that flag to mean that we
2727 * might have S/G lists, and look at the S/G segment count
2728 * to figure out whether that is the case for each individual
2731 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2732 bus_dma_segment_t *req_sg;
2734 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2735 request = (uint8_t *)req_sg[0].ds_addr;
2737 request = ccb->smpio.smp_request;
2739 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2740 bus_dma_segment_t *rsp_sg;
2742 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2743 response = (uint8_t *)rsp_sg[0].ds_addr;
2745 response = ccb->smpio.smp_response;
2747 case CAM_DATA_VADDR:
2748 request = ccb->smpio.smp_request;
2749 response = ccb->smpio.smp_response;
2752 ccb->ccb_h.status = CAM_REQ_INVALID;
2757 cm = mps_alloc_command(sc);
2759 mps_printf(sc, "%s: cannot allocate command\n", __func__);
2760 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2765 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2766 bzero(req, sizeof(*req));
2767 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2769 /* Allow the chip to use any route to this SAS address. */
2770 req->PhysicalPort = 0xff;
2772 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2774 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2776 mps_dprint(sc, MPS_INFO, "%s: sending SMP request to SAS "
2777 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2779 mpi_init_sge(cm, req, &req->SGL);
2782 * Set up a uio to pass into mps_map_command(). This allows us to
2783 * do one map command, and one busdma call in there.
2785 cm->cm_uio.uio_iov = cm->cm_iovec;
2786 cm->cm_uio.uio_iovcnt = 2;
2787 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2790 * The read/write flag isn't used by busdma, but set it just in
2791 * case. This isn't exactly accurate, either, since we're going in
2794 cm->cm_uio.uio_rw = UIO_WRITE;
2796 cm->cm_iovec[0].iov_base = request;
2797 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2798 cm->cm_iovec[1].iov_base = response;
2799 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2801 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2802 cm->cm_iovec[1].iov_len;
2805 * Trigger a warning message in mps_data_cb() for the user if we
2806 * wind up exceeding two S/G segments. The chip expects one
2807 * segment for the request and another for the response.
2809 cm->cm_max_segs = 2;
2811 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2812 cm->cm_complete = mpssas_smpio_complete;
2813 cm->cm_complete_data = ccb;
2816 * Tell the mapping code that we're using a uio, and that this is
2817 * an SMP passthrough request. There is a little special-case
2818 * logic there (in mps_data_cb()) to handle the bidirectional
2821 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2822 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2824 /* The chip data format is little endian. */
2825 req->SASAddress.High = htole32(sasaddr >> 32);
2826 req->SASAddress.Low = htole32(sasaddr);
2829 * XXX Note that we don't have a timeout/abort mechanism here.
2830 * From the manual, it looks like task management requests only
2831 * work for SCSI IO and SATA passthrough requests. We may need to
2832 * have a mechanism to retry requests in the event of a chip reset
2833 * at least. Hopefully the chip will insure that any errors short
2834 * of that are relayed back to the driver.
2836 error = mps_map_command(sc, cm);
2837 if ((error != 0) && (error != EINPROGRESS)) {
2838 mps_printf(sc, "%s: error %d returned from mps_map_command()\n",
2846 mps_free_command(sc, cm);
2847 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2854 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2856 struct mps_softc *sc;
2857 struct mpssas_target *targ;
2858 uint64_t sasaddr = 0;
2863 * Make sure the target exists.
2865 targ = &sassc->targets[ccb->ccb_h.target_id];
2866 if (targ->handle == 0x0) {
2867 mps_printf(sc, "%s: target %d does not exist!\n", __func__,
2868 ccb->ccb_h.target_id);
2869 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2875 * If this device has an embedded SMP target, we'll talk to it
2877 * figure out what the expander's address is.
2879 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2880 sasaddr = targ->sasaddr;
2883 * If we don't have a SAS address for the expander yet, try
2884 * grabbing it from the page 0x83 information cached in the
2885 * transport layer for this target. LSI expanders report the
2886 * expander SAS address as the port-associated SAS address in
2887 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2890 * XXX KDM disable this for now, but leave it commented out so that
2891 * it is obvious that this is another possible way to get the SAS
2894 * The parent handle method below is a little more reliable, and
2895 * the other benefit is that it works for devices other than SES
2896 * devices. So you can send a SMP request to a da(4) device and it
2897 * will get routed to the expander that device is attached to.
2898 * (Assuming the da(4) device doesn't contain an SMP target...)
2902 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2906 * If we still don't have a SAS address for the expander, look for
2907 * the parent device of this device, which is probably the expander.
2910 #ifdef OLD_MPS_PROBE
2911 struct mpssas_target *parent_target;
2914 if (targ->parent_handle == 0x0) {
2915 mps_printf(sc, "%s: handle %d does not have a valid "
2916 "parent handle!\n", __func__, targ->handle);
2917 ccb->ccb_h.status = CAM_REQ_INVALID;
2920 #ifdef OLD_MPS_PROBE
2921 parent_target = mpssas_find_target_by_handle(sassc, 0,
2922 targ->parent_handle);
2924 if (parent_target == NULL) {
2925 mps_printf(sc, "%s: handle %d does not have a valid "
2926 "parent target!\n", __func__, targ->handle);
2927 ccb->ccb_h.status = CAM_REQ_INVALID;
2931 if ((parent_target->devinfo &
2932 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2933 mps_printf(sc, "%s: handle %d parent %d does not "
2934 "have an SMP target!\n", __func__,
2935 targ->handle, parent_target->handle);
2936 ccb->ccb_h.status = CAM_REQ_INVALID;
2941 sasaddr = parent_target->sasaddr;
2942 #else /* OLD_MPS_PROBE */
2943 if ((targ->parent_devinfo &
2944 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2945 mps_printf(sc, "%s: handle %d parent %d does not "
2946 "have an SMP target!\n", __func__,
2947 targ->handle, targ->parent_handle);
2948 ccb->ccb_h.status = CAM_REQ_INVALID;
2952 if (targ->parent_sasaddr == 0x0) {
2953 mps_printf(sc, "%s: handle %d parent handle %d does "
2954 "not have a valid SAS address!\n",
2955 __func__, targ->handle, targ->parent_handle);
2956 ccb->ccb_h.status = CAM_REQ_INVALID;
2960 sasaddr = targ->parent_sasaddr;
2961 #endif /* OLD_MPS_PROBE */
2966 mps_printf(sc, "%s: unable to find SAS address for handle %d\n",
2967 __func__, targ->handle);
2968 ccb->ccb_h.status = CAM_REQ_INVALID;
2971 mpssas_send_smpcmd(sassc, ccb, sasaddr);
2979 #endif //__FreeBSD_version >= 900026
2982 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
2984 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2985 struct mps_softc *sc;
2986 struct mps_command *tm;
2987 struct mpssas_target *targ;
2989 mps_dprint(sassc->sc, MPS_TRACE, __func__);
2990 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
2993 tm = mps_alloc_command(sc);
2995 mps_printf(sc, "comand alloc failure in mpssas_action_resetdev\n");
2996 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3001 targ = &sassc->targets[ccb->ccb_h.target_id];
3002 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3003 req->DevHandle = htole16(targ->handle);
3004 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3005 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3007 /* SAS Hard Link Reset / SATA Link Reset */
3008 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3011 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3012 tm->cm_complete = mpssas_resetdev_complete;
3013 tm->cm_complete_data = ccb;
3015 mps_map_command(sc, tm);
3019 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3021 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3024 mps_dprint(sc, MPS_TRACE, __func__);
3025 mtx_assert(&sc->mps_mtx, MA_OWNED);
3027 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3028 ccb = tm->cm_complete_data;
3031 * Currently there should be no way we can hit this case. It only
3032 * happens when we have a failure to allocate chain frames, and
3033 * task management commands don't have S/G lists.
3035 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3036 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3038 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3040 mps_printf(sc, "%s: cm_flags = %#x for reset of handle %#04x! "
3041 "This should not happen!\n", __func__, tm->cm_flags,
3043 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3047 printf("%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3048 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3050 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3051 ccb->ccb_h.status = CAM_REQ_CMP;
3052 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3056 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3060 mpssas_free_tm(sc, tm);
3065 mpssas_poll(struct cam_sim *sim)
3067 struct mpssas_softc *sassc;
3069 sassc = cam_sim_softc(sim);
3071 if (sassc->sc->mps_debug & MPS_TRACE) {
3072 /* frequent debug messages during a panic just slow
3073 * everything down too much.
3075 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3076 sassc->sc->mps_debug &= ~MPS_TRACE;
3079 mps_intr_locked(sassc->sc);
3083 mpssas_rescan_done(struct cam_periph *periph, union ccb *done_ccb)
3085 struct mpssas_softc *sassc;
3088 if (done_ccb == NULL)
3091 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3093 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3095 xpt_path_string(done_ccb->ccb_h.path, path_str, sizeof(path_str));
3096 mps_dprint(sassc->sc, MPS_INFO, "Completing rescan for %s\n", path_str);
3098 xpt_free_path(done_ccb->ccb_h.path);
3099 xpt_free_ccb(done_ccb);
3101 #if __FreeBSD_version < 1000006
3103 * Before completing scan, get EEDP stuff for all of the existing
3106 mpssas_check_eedp(sassc);
3111 /* thread to handle bus rescans */
3113 mpssas_scanner_thread(void *arg)
3115 struct mpssas_softc *sassc;
3116 struct mps_softc *sc;
3119 sassc = (struct mpssas_softc *)arg;
3122 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3126 /* Sleep for 1 second and check the queue status*/
3127 msleep(&sassc->ccb_scanq, &sc->mps_mtx, PRIBIO,
3128 "mps_scanq", 1 * hz);
3129 if (sassc->flags & MPSSAS_SHUTDOWN) {
3130 mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
3135 ccb = (union ccb *)TAILQ_FIRST(&sassc->ccb_scanq);
3139 TAILQ_REMOVE(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
3141 if (sassc->flags & MPSSAS_SHUTDOWN) {
3142 mps_dprint(sc, MPS_TRACE, "Scanner shutting down\n");
3148 sassc->flags &= ~MPSSAS_SCANTHREAD;
3149 wakeup(&sassc->flags);
3151 mps_dprint(sc, MPS_TRACE, "Scanner exiting\n");
3156 * This function will send READ_CAP_16 to find out EEDP protection mode.
3157 * It will check inquiry data before sending READ_CAP_16.
3158 * Callback for READ_CAP_16 is "mpssas_read_cap_done".
3159 * This is insternal scsi command and we need to take care release of devq, if
3160 * CAM_DEV_QFRZN is set. Driver needs to release devq if it has frozen any.
3161 * xpt_release_devq is called from mpssas_read_cap_done.
3163 * All other commands will be handled by periph layer and there it will
3164 * check for "CAM_DEV_QFRZN" and release of devq will be done.
3167 mpssas_rescan(struct mpssas_softc *sassc, union ccb *ccb)
3171 mps_dprint(sassc->sc, MPS_TRACE, "%s\n", __func__);
3173 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3178 xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
3179 mps_dprint(sassc->sc, MPS_INFO, "Queueing rescan for %s\n", path_str);
3181 /* Prepare request */
3182 ccb->ccb_h.ppriv_ptr1 = sassc;
3183 ccb->ccb_h.cbfcnp = mpssas_rescan_done;
3184 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, MPS_PRIORITY_XPT);
3185 TAILQ_INSERT_TAIL(&sassc->ccb_scanq, &ccb->ccb_h, sim_links.tqe);
3186 wakeup(&sassc->ccb_scanq);
3189 #if __FreeBSD_version >= 1000006
3191 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3194 struct mps_softc *sc;
3196 sc = (struct mps_softc *)callback_arg;
3199 case AC_ADVINFO_CHANGED: {
3200 struct mpssas_target *target;
3201 struct mpssas_softc *sassc;
3202 struct scsi_read_capacity_data_long rcap_buf;
3203 struct ccb_dev_advinfo cdai;
3204 struct mpssas_lun *lun;
3209 buftype = (uintptr_t)arg;
3215 * We're only interested in read capacity data changes.
3217 if (buftype != CDAI_TYPE_RCAPLONG)
3221 * We're only interested in devices that are attached to
3224 if (xpt_path_path_id(path) != sassc->sim->path_id)
3228 * We should have a handle for this, but check to make sure.
3230 target = &sassc->targets[xpt_path_target_id(path)];
3231 if (target->handle == 0)
3234 lunid = xpt_path_lun_id(path);
3236 SLIST_FOREACH(lun, &target->luns, lun_link) {
3237 if (lun->lun_id == lunid) {
3243 if (found_lun == 0) {
3244 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3247 mps_dprint(sc, MPS_FAULT, "Unable to alloc "
3248 "LUN for EEDP support.\n");
3251 lun->lun_id = lunid;
3252 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3255 bzero(&rcap_buf, sizeof(rcap_buf));
3256 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3257 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3258 cdai.ccb_h.flags = CAM_DIR_IN;
3259 cdai.buftype = CDAI_TYPE_RCAPLONG;
3261 cdai.bufsiz = sizeof(rcap_buf);
3262 cdai.buf = (uint8_t *)&rcap_buf;
3263 xpt_action((union ccb *)&cdai);
3264 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3265 cam_release_devq(cdai.ccb_h.path,
3268 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3269 && (rcap_buf.prot & SRC16_PROT_EN)) {
3270 lun->eedp_formatted = TRUE;
3271 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3273 lun->eedp_formatted = FALSE;
3274 lun->eedp_block_size = 0;
3282 #else /* __FreeBSD_version >= 1000006 */
3285 mpssas_check_eedp(struct mpssas_softc *sassc)
3287 struct mps_softc *sc = sassc->sc;
3288 struct ccb_scsiio *csio;
3289 struct scsi_read_capacity_16 *scsi_cmd;
3290 struct scsi_read_capacity_eedp *rcap_buf;
3292 path_id_t pathid = cam_sim_path(sassc->sim);
3293 target_id_t targetid;
3295 struct cam_periph *found_periph;
3296 struct mpssas_target *target;
3297 struct mpssas_lun *lun;
3299 struct ccb_getdev cgd;
3303 * Issue a READ CAPACITY 16 command to each LUN of each target. This
3304 * info is used to determine if the LUN is formatted for EEDP support.
3306 for (targetid = 0; targetid < sc->facts->MaxTargets; targetid++) {
3307 target = &sassc->targets[targetid];
3308 if (target->handle == 0x0) {
3314 ccb = xpt_alloc_ccb_nowait();
3316 mps_dprint(sc, MPS_FAULT, "Unable to alloc CCB "
3317 "for EEDP support.\n");
3321 if (xpt_create_path(&ccb->ccb_h.path, NULL,
3322 pathid, targetid, lunid) != CAM_REQ_CMP) {
3323 mps_dprint(sc, MPS_FAULT, "Unable to create "
3324 "path for EEDP support\n");
3330 * If a periph is returned, the LUN exists. Create an
3331 * entry in the target's LUN list.
3333 if ((found_periph = cam_periph_find(ccb->ccb_h.path,
3336 * If LUN is already in list, don't create a new
3340 SLIST_FOREACH(lun, &target->luns, lun_link) {
3341 if (lun->lun_id == lunid) {
3347 lun = malloc(sizeof(struct mpssas_lun),
3348 M_MPT2, M_NOWAIT | M_ZERO);
3350 mps_dprint(sc, MPS_FAULT,
3351 "Unable to alloc LUN for "
3353 xpt_free_path(ccb->ccb_h.path);
3357 lun->lun_id = lunid;
3358 SLIST_INSERT_HEAD(&target->luns, lun,
3362 /* Before Issuing READ CAPACITY 16,
3363 * check Device type.
3365 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path,
3366 CAM_PRIORITY_NORMAL);
3367 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
3368 xpt_action((union ccb *)&cgd);
3371 * If this flag is set in the inquiry data,
3372 * the device supports protection information,
3373 * and must support the 16 byte read
3374 * capacity command, otherwise continue without
3375 * sending read cap 16
3378 xpt_path_string(ccb->ccb_h.path, path_str,
3381 if ((cgd.inq_data.spc3_flags &
3382 SPC3_SID_PROTECT) == 0) {
3383 xpt_free_path(ccb->ccb_h.path);
3388 mps_dprint(sc, MPS_INFO,
3389 "Sending read cap: path %s"
3390 " handle %d\n", path_str, target->handle );
3393 * Issue a READ CAPACITY 16 command for the LUN.
3394 * The mpssas_read_cap_done function will load
3395 * the read cap info into the LUN struct.
3398 malloc(sizeof(struct scsi_read_capacity_eedp),
3399 M_MPT2, M_NOWAIT| M_ZERO);
3400 if (rcap_buf == NULL) {
3401 mps_dprint(sc, MPS_FAULT, "Unable to alloc read "
3402 "capacity buffer for EEDP support.\n");
3403 xpt_free_path(ccb->ccb_h.path);
3408 csio->ccb_h.func_code = XPT_SCSI_IO;
3409 csio->ccb_h.flags = CAM_DIR_IN;
3410 csio->ccb_h.retry_count = 4;
3411 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3412 csio->ccb_h.timeout = 60000;
3413 csio->data_ptr = (uint8_t *)rcap_buf;
3414 csio->dxfer_len = sizeof(struct
3415 scsi_read_capacity_eedp);
3416 csio->sense_len = MPS_SENSE_LEN;
3417 csio->cdb_len = sizeof(*scsi_cmd);
3418 csio->tag_action = MSG_SIMPLE_Q_TAG;
3420 scsi_cmd = (struct scsi_read_capacity_16 *)
3421 &csio->cdb_io.cdb_bytes;
3422 bzero(scsi_cmd, sizeof(*scsi_cmd));
3423 scsi_cmd->opcode = 0x9E;
3424 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3425 ((uint8_t *)scsi_cmd)[13] = sizeof(struct
3426 scsi_read_capacity_eedp);
3429 * Set the path, target and lun IDs for the READ
3432 ccb->ccb_h.path_id =
3433 xpt_path_path_id(ccb->ccb_h.path);
3434 ccb->ccb_h.target_id =
3435 xpt_path_target_id(ccb->ccb_h.path);
3436 ccb->ccb_h.target_lun =
3437 xpt_path_lun_id(ccb->ccb_h.path);
3439 ccb->ccb_h.ppriv_ptr1 = sassc;
3442 xpt_free_path(ccb->ccb_h.path);
3445 } while (found_periph);
3451 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3453 struct mpssas_softc *sassc;
3454 struct mpssas_target *target;
3455 struct mpssas_lun *lun;
3456 struct scsi_read_capacity_eedp *rcap_buf;
3458 if (done_ccb == NULL)
3461 /* Driver need to release devq, it Scsi command is
3462 * generated by driver internally.
3463 * Currently there is a single place where driver
3464 * calls scsi command internally. In future if driver
3465 * calls more scsi command internally, it needs to release
3466 * devq internally, since those command will not go back to
3469 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3470 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3471 xpt_release_devq(done_ccb->ccb_h.path,
3472 /*count*/ 1, /*run_queue*/TRUE);
3475 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3478 * Get the LUN ID for the path and look it up in the LUN list for the
3481 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3482 target = &sassc->targets[done_ccb->ccb_h.target_id];
3483 SLIST_FOREACH(lun, &target->luns, lun_link) {
3484 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3488 * Got the LUN in the target's LUN list. Fill it in
3489 * with EEDP info. If the READ CAP 16 command had some
3490 * SCSI error (common if command is not supported), mark
3491 * the lun as not supporting EEDP and set the block size
3494 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3495 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3496 lun->eedp_formatted = FALSE;
3497 lun->eedp_block_size = 0;
3501 if (rcap_buf->protect & 0x01) {
3502 lun->eedp_formatted = TRUE;
3503 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3508 // Finished with this CCB and path.
3509 free(rcap_buf, M_MPT2);
3510 xpt_free_path(done_ccb->ccb_h.path);
3511 xpt_free_ccb(done_ccb);
3513 #endif /* __FreeBSD_version >= 1000006 */
3516 mpssas_startup(struct mps_softc *sc)
3518 struct mpssas_softc *sassc;
3521 * Send the port enable message and set the wait_for_port_enable flag.
3522 * This flag helps to keep the simq frozen until all discovery events
3526 mpssas_startup_increment(sassc);
3527 sc->wait_for_port_enable = 1;
3528 mpssas_send_portenable(sc);
3533 mpssas_send_portenable(struct mps_softc *sc)
3535 MPI2_PORT_ENABLE_REQUEST *request;
3536 struct mps_command *cm;
3538 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3540 if ((cm = mps_alloc_command(sc)) == NULL)
3542 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3543 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3544 request->MsgFlags = 0;
3546 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3547 cm->cm_complete = mpssas_portenable_complete;
3551 mps_map_command(sc, cm);
3552 mps_dprint(sc, MPS_TRACE,
3553 "mps_send_portenable finished cm %p req %p complete %p\n",
3554 cm, cm->cm_req, cm->cm_complete);
3559 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3561 MPI2_PORT_ENABLE_REPLY *reply;
3562 struct mpssas_softc *sassc;
3564 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
3568 * Currently there should be no way we can hit this case. It only
3569 * happens when we have a failure to allocate chain frames, and
3570 * port enable commands don't have S/G lists.
3572 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3573 mps_printf(sc, "%s: cm_flags = %#x for port enable! "
3574 "This should not happen!\n", __func__, cm->cm_flags);
3577 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3579 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3580 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3581 MPI2_IOCSTATUS_SUCCESS)
3582 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3584 mps_free_command(sc, cm);
3585 if (sc->mps_ich.ich_arg != NULL) {
3586 mps_dprint(sc, MPS_INFO, "disestablish config intrhook\n");
3587 config_intrhook_disestablish(&sc->mps_ich);
3588 sc->mps_ich.ich_arg = NULL;
3592 * Get WarpDrive info after discovery is complete but before the scan
3593 * starts. At this point, all devices are ready to be exposed to the
3594 * OS. If devices should be hidden instead, take them out of the
3595 * 'targets' array before the scan. The devinfo for a disk will have
3596 * some info and a volume's will be 0. Use that to remove disks.
3598 mps_wd_config_pages(sc);
3601 * Done waiting for port enable to complete. Decrement the refcount.
3602 * If refcount is 0, discovery is complete and a rescan of the bus can
3603 * take place. Since the simq was explicitly frozen before port
3604 * enable, it must be explicitly released here to keep the
3605 * freeze/release count in sync.
3607 sc->wait_for_port_enable = 0;
3608 sc->port_enable_complete = 1;
3609 wakeup(&sc->port_enable_complete);
3610 mpssas_startup_decrement(sassc);
3611 xpt_release_simq(sassc->sim, 1);