2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
5 * Copyright (c) 2000, 2001 by Greg Ansley
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
61 * Support from LSI-Logic has also gone a great deal toward making this a
62 * workable subsystem and is gratefully acknowledged.
65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66 * Copyright (c) 2005, WHEEL Sp. z o.o.
67 * Copyright (c) 2004, 2005 Justin T. Gibbs
68 * All rights reserved.
70 * Redistribution and use in source and binary forms, with or without
71 * modification, are permitted provided that the following conditions are
73 * 1. Redistributions of source code must retain the above copyright
74 * notice, this list of conditions and the following disclaimer.
75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76 * substantially similar to the "NO WARRANTY" disclaimer below
77 * ("Disclaimer") and any redistribution must be conditioned upon including
78 * a substantially similar Disclaimer requirement for further binary
80 * 3. Neither the names of the above listed copyright holders nor the names
81 * of any contributors may be used to endorse or promote products derived
82 * from this software without specific prior written permission.
84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96 #include <sys/cdefs.h>
97 __FBSDID("$FreeBSD$");
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 #include "dev/mpt/mpilib/mpi_sas.h"
109 #include <sys/callout.h>
110 #include <sys/kthread.h>
111 #include <sys/sysctl.h>
113 static void mpt_poll(struct cam_sim *);
114 static timeout_t mpt_timeout;
115 static void mpt_action(struct cam_sim *, union ccb *);
117 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
118 static void mpt_setwidth(struct mpt_softc *, int, int);
119 static void mpt_setsync(struct mpt_softc *, int, int, int);
120 static int mpt_update_spi_config(struct mpt_softc *, int);
122 static mpt_reply_handler_t mpt_scsi_reply_handler;
123 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
124 static mpt_reply_handler_t mpt_fc_els_reply_handler;
125 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
126 MSG_DEFAULT_REPLY *);
127 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
128 static int mpt_fc_reset_link(struct mpt_softc *, int);
130 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
131 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
132 static void mpt_recovery_thread(void *arg);
133 static void mpt_recover_commands(struct mpt_softc *mpt);
135 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
136 u_int, u_int, u_int, int);
138 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
139 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
140 static int mpt_add_els_buffers(struct mpt_softc *mpt);
141 static int mpt_add_target_commands(struct mpt_softc *mpt);
142 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
143 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
144 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
145 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
146 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
147 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
148 uint8_t, uint8_t const *);
150 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
151 tgt_resource_t *, int);
152 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
153 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
154 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
155 static mpt_reply_handler_t mpt_sata_pass_reply_handler;
157 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
158 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
159 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
160 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
162 static mpt_probe_handler_t mpt_cam_probe;
163 static mpt_attach_handler_t mpt_cam_attach;
164 static mpt_enable_handler_t mpt_cam_enable;
165 static mpt_ready_handler_t mpt_cam_ready;
166 static mpt_event_handler_t mpt_cam_event;
167 static mpt_reset_handler_t mpt_cam_ioc_reset;
168 static mpt_detach_handler_t mpt_cam_detach;
170 static struct mpt_personality mpt_cam_personality =
173 .probe = mpt_cam_probe,
174 .attach = mpt_cam_attach,
175 .enable = mpt_cam_enable,
176 .ready = mpt_cam_ready,
177 .event = mpt_cam_event,
178 .reset = mpt_cam_ioc_reset,
179 .detach = mpt_cam_detach,
182 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
183 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
185 int mpt_enable_sata_wc = -1;
186 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
189 mpt_cam_probe(struct mpt_softc *mpt)
194 * Only attach to nodes that support the initiator or target role
195 * (or want to) or have RAID physical devices that need CAM pass-thru
198 if (mpt->do_cfg_role) {
199 role = mpt->cfg_role;
203 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
204 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
211 mpt_cam_attach(struct mpt_softc *mpt)
213 struct cam_devq *devq;
214 mpt_handler_t handler;
219 TAILQ_INIT(&mpt->request_timeout_list);
220 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
221 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
223 handler.reply_handler = mpt_scsi_reply_handler;
224 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
225 &scsi_io_handler_id);
231 handler.reply_handler = mpt_scsi_tmf_reply_handler;
232 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
233 &scsi_tmf_handler_id);
240 * If we're fibre channel and could support target mode, we register
241 * an ELS reply handler and give it resources.
243 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
244 handler.reply_handler = mpt_fc_els_reply_handler;
245 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
251 if (mpt_add_els_buffers(mpt) == FALSE) {
256 maxq -= mpt->els_cmds_allocated;
260 * If we support target mode, we register a reply handler for it,
261 * but don't add command resources until we actually enable target
264 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
265 handler.reply_handler = mpt_scsi_tgt_reply_handler;
266 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
267 &mpt->scsi_tgt_handler_id);
275 handler.reply_handler = mpt_sata_pass_reply_handler;
276 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
277 &sata_pass_handler_id);
285 * We keep one request reserved for timeout TMF requests.
287 mpt->tmf_req = mpt_get_request(mpt, FALSE);
288 if (mpt->tmf_req == NULL) {
289 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
296 * Mark the request as free even though not on the free list.
297 * There is only one TMF request allowed to be outstanding at
298 * a time and the TMF routines perform their own allocation
299 * tracking using the standard state flags.
301 mpt->tmf_req->state = REQ_STATE_FREE;
305 * The rest of this is CAM foo, for which we need to drop our lock
309 if (mpt_spawn_recovery_thread(mpt) != 0) {
310 mpt_prt(mpt, "Unable to spawn recovery thread!\n");
316 * Create the device queue for our SIM(s).
318 devq = cam_simq_alloc(maxq);
320 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
326 * Construct our SIM entry.
329 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
330 if (mpt->sim == NULL) {
331 mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
338 * Register exactly this bus.
341 if (xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
342 mpt_prt(mpt, "Bus registration Failed!\n");
348 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
349 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
350 mpt_prt(mpt, "Unable to allocate Path!\n");
358 * Only register a second bus for RAID physical
359 * devices if the controller supports RAID.
361 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
366 * Create a "bus" to export all hidden disks to CAM.
369 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
370 if (mpt->phydisk_sim == NULL) {
371 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
380 if (xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
382 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
388 if (xpt_create_path(&mpt->phydisk_path, NULL,
389 cam_sim_path(mpt->phydisk_sim),
390 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
391 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
397 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
406 * Read FC configuration information
409 mpt_read_config_info_fc(struct mpt_softc *mpt)
411 struct sysctl_ctx_list *ctx;
412 struct sysctl_oid *tree;
413 char *topology = NULL;
416 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
417 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
421 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
422 mpt->mpt_fcport_page0.Header.PageVersion,
423 mpt->mpt_fcport_page0.Header.PageLength,
424 mpt->mpt_fcport_page0.Header.PageNumber,
425 mpt->mpt_fcport_page0.Header.PageType);
428 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
429 sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
431 mpt_prt(mpt, "failed to read FC Port Page 0\n");
434 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
436 switch (mpt->mpt_fcport_page0.CurrentSpeed) {
437 case MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT:
438 mpt->mpt_fcport_speed = 1;
440 case MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT:
441 mpt->mpt_fcport_speed = 2;
443 case MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT:
444 mpt->mpt_fcport_speed = 10;
446 case MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT:
447 mpt->mpt_fcport_speed = 4;
450 mpt->mpt_fcport_speed = 0;
454 switch (mpt->mpt_fcport_page0.Flags &
455 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
456 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
457 mpt->mpt_fcport_speed = 0;
458 topology = "<NO LOOP>";
460 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
463 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
464 topology = "NL-Port";
466 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
469 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
470 topology = "FL-Port";
473 mpt->mpt_fcport_speed = 0;
478 mpt->scinfo.fc.wwnn = ((uint64_t)mpt->mpt_fcport_page0.WWNN.High << 32)
479 | mpt->mpt_fcport_page0.WWNN.Low;
480 mpt->scinfo.fc.wwpn = ((uint64_t)mpt->mpt_fcport_page0.WWPN.High << 32)
481 | mpt->mpt_fcport_page0.WWPN.Low;
482 mpt->scinfo.fc.portid = mpt->mpt_fcport_page0.PortIdentifier;
484 mpt_lprt(mpt, MPT_PRT_INFO,
485 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
486 "Speed %u-Gbit\n", topology,
487 mpt->mpt_fcport_page0.WWNN.High,
488 mpt->mpt_fcport_page0.WWNN.Low,
489 mpt->mpt_fcport_page0.WWPN.High,
490 mpt->mpt_fcport_page0.WWPN.Low,
491 mpt->mpt_fcport_speed);
493 ctx = device_get_sysctl_ctx(mpt->dev);
494 tree = device_get_sysctl_tree(mpt->dev);
496 snprintf(mpt->scinfo.fc.wwnn, sizeof (mpt->scinfo.fc.wwnn),
497 "0x%08x%08x", mpt->mpt_fcport_page0.WWNN.High,
498 mpt->mpt_fcport_page0.WWNN.Low);
500 snprintf(mpt->scinfo.fc.wwpn, sizeof (mpt->scinfo.fc.wwpn),
501 "0x%08x%08x", mpt->mpt_fcport_page0.WWPN.High,
502 mpt->mpt_fcport_page0.WWPN.Low);
504 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
505 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
506 "World Wide Node Name");
508 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
509 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
510 "World Wide Port Name");
517 * Set FC configuration information.
520 mpt_set_initial_config_fc(struct mpt_softc *mpt)
522 CONFIG_PAGE_FC_PORT_1 fc;
527 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
528 &fc.Header, FALSE, 5000);
530 mpt_prt(mpt, "failed to read FC page 1 header\n");
531 return (mpt_fc_reset_link(mpt, 1));
534 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
535 &fc.Header, sizeof (fc), FALSE, 5000);
537 mpt_prt(mpt, "failed to read FC page 1\n");
538 return (mpt_fc_reset_link(mpt, 1));
540 mpt2host_config_page_fc_port_1(&fc);
543 * Check our flags to make sure we support the role we want.
549 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
550 role |= MPT_ROLE_INITIATOR;
552 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
553 role |= MPT_ROLE_TARGET;
556 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
558 if (mpt->do_cfg_role == 0) {
559 role = mpt->cfg_role;
561 mpt->do_cfg_role = 0;
564 if (role != mpt->cfg_role) {
565 if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
566 if ((role & MPT_ROLE_INITIATOR) == 0) {
567 mpt_prt(mpt, "adding initiator role\n");
568 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
571 mpt_prt(mpt, "keeping initiator role\n");
573 } else if (role & MPT_ROLE_INITIATOR) {
574 mpt_prt(mpt, "removing initiator role\n");
577 if (mpt->cfg_role & MPT_ROLE_TARGET) {
578 if ((role & MPT_ROLE_TARGET) == 0) {
579 mpt_prt(mpt, "adding target role\n");
580 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
583 mpt_prt(mpt, "keeping target role\n");
585 } else if (role & MPT_ROLE_TARGET) {
586 mpt_prt(mpt, "removing target role\n");
589 mpt->role = mpt->cfg_role;
592 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
593 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
594 mpt_prt(mpt, "adding OXID option\n");
595 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
602 host2mpt_config_page_fc_port_1(&fc);
603 r = mpt_write_cfg_page(mpt,
604 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
605 sizeof(fc), FALSE, 5000);
607 mpt_prt(mpt, "failed to update NVRAM with changes\n");
610 mpt_prt(mpt, "NOTE: NVRAM changes will not take "
611 "effect until next reboot or IOC reset\n");
617 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
619 ConfigExtendedPageHeader_t hdr;
620 struct mptsas_phyinfo *phyinfo;
621 SasIOUnitPage0_t *buffer;
624 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
625 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
629 if (hdr.ExtPageLength == 0) {
634 len = hdr.ExtPageLength * 4;
635 buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
636 if (buffer == NULL) {
641 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
642 0, &hdr, buffer, len, 0, 10000);
644 free(buffer, M_DEVBUF);
648 portinfo->num_phys = buffer->NumPhys;
649 portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) *
650 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
651 if (portinfo->phy_info == NULL) {
652 free(buffer, M_DEVBUF);
657 for (i = 0; i < portinfo->num_phys; i++) {
658 phyinfo = &portinfo->phy_info[i];
659 phyinfo->phy_num = i;
660 phyinfo->port_id = buffer->PhyData[i].Port;
661 phyinfo->negotiated_link_rate =
662 buffer->PhyData[i].NegotiatedLinkRate;
664 le16toh(buffer->PhyData[i].ControllerDevHandle);
667 free(buffer, M_DEVBUF);
673 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
674 uint32_t form, uint32_t form_specific)
676 ConfigExtendedPageHeader_t hdr;
677 SasPhyPage0_t *buffer;
680 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
681 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
685 if (hdr.ExtPageLength == 0) {
690 buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
691 if (buffer == NULL) {
696 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
697 form + form_specific, &hdr, buffer,
698 sizeof(SasPhyPage0_t), 0, 10000);
700 free(buffer, M_DEVBUF);
704 phy_info->hw_link_rate = buffer->HwLinkRate;
705 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
706 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
707 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
709 free(buffer, M_DEVBUF);
715 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
716 uint32_t form, uint32_t form_specific)
718 ConfigExtendedPageHeader_t hdr;
719 SasDevicePage0_t *buffer;
720 uint64_t sas_address;
723 bzero(device_info, sizeof(*device_info));
724 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
725 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
729 if (hdr.ExtPageLength == 0) {
734 buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
735 if (buffer == NULL) {
740 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
741 form + form_specific, &hdr, buffer,
742 sizeof(SasDevicePage0_t), 0, 10000);
744 free(buffer, M_DEVBUF);
748 device_info->dev_handle = le16toh(buffer->DevHandle);
749 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
750 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
751 device_info->slot = le16toh(buffer->Slot);
752 device_info->phy_num = buffer->PhyNum;
753 device_info->physical_port = buffer->PhysicalPort;
754 device_info->target_id = buffer->TargetID;
755 device_info->bus = buffer->Bus;
756 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
757 device_info->sas_address = le64toh(sas_address);
758 device_info->device_info = le32toh(buffer->DeviceInfo);
760 free(buffer, M_DEVBUF);
766 * Read SAS configuration information. Nothing to do yet.
769 mpt_read_config_info_sas(struct mpt_softc *mpt)
771 struct mptsas_portinfo *portinfo;
772 struct mptsas_phyinfo *phyinfo;
775 portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
776 if (portinfo == NULL)
779 error = mptsas_sas_io_unit_pg0(mpt, portinfo);
781 free(portinfo, M_DEVBUF);
785 for (i = 0; i < portinfo->num_phys; i++) {
786 phyinfo = &portinfo->phy_info[i];
787 error = mptsas_sas_phy_pg0(mpt, phyinfo,
788 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
789 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
792 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
793 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
794 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
798 phyinfo->identify.phy_num = phyinfo->phy_num = i;
799 if (phyinfo->attached.dev_handle)
800 error = mptsas_sas_device_pg0(mpt,
802 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
803 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
804 phyinfo->attached.dev_handle);
808 mpt->sas_portinfo = portinfo;
813 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
816 SataPassthroughRequest_t *pass;
820 req = mpt_get_request(mpt, 0);
824 pass = req->req_vbuf;
825 bzero(pass, sizeof(SataPassthroughRequest_t));
826 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
827 pass->TargetID = devinfo->target_id;
828 pass->Bus = devinfo->bus;
829 pass->PassthroughFlags = 0;
830 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
831 pass->DataLength = 0;
832 pass->MsgContext = htole32(req->index | sata_pass_handler_id);
833 pass->CommandFIS[0] = 0x27;
834 pass->CommandFIS[1] = 0x80;
835 pass->CommandFIS[2] = 0xef;
836 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
837 pass->CommandFIS[7] = 0x40;
838 pass->CommandFIS[15] = 0x08;
840 mpt_check_doorbell(mpt);
841 mpt_send_cmd(mpt, req);
842 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
845 mpt_free_request(mpt, req);
846 printf("error %d sending passthrough\n", error);
850 status = le16toh(req->IOCStatus);
851 if (status != MPI_IOCSTATUS_SUCCESS) {
852 mpt_free_request(mpt, req);
853 printf("IOCSTATUS %d\n", status);
857 mpt_free_request(mpt, req);
861 * Set SAS configuration information. Nothing to do yet.
864 mpt_set_initial_config_sas(struct mpt_softc *mpt)
866 struct mptsas_phyinfo *phyinfo;
869 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
870 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
871 phyinfo = &mpt->sas_portinfo->phy_info[i];
872 if (phyinfo->attached.dev_handle == 0)
874 if ((phyinfo->attached.device_info &
875 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
878 device_printf(mpt->dev,
879 "%sabling SATA WC on phy %d\n",
880 (mpt_enable_sata_wc) ? "En" : "Dis", i);
881 mptsas_set_sata_wc(mpt, &phyinfo->attached,
890 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
891 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
895 if (reply_frame != NULL) {
896 req->IOCStatus = le16toh(reply_frame->IOCStatus);
898 req->state &= ~REQ_STATE_QUEUED;
899 req->state |= REQ_STATE_DONE;
900 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
901 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
903 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
905 * Whew- we can free this request (late completion)
907 mpt_free_request(mpt, req);
915 * Read SCSI configuration information
918 mpt_read_config_info_spi(struct mpt_softc *mpt)
922 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
923 &mpt->mpt_port_page0.Header, FALSE, 5000);
927 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
928 mpt->mpt_port_page0.Header.PageVersion,
929 mpt->mpt_port_page0.Header.PageLength,
930 mpt->mpt_port_page0.Header.PageNumber,
931 mpt->mpt_port_page0.Header.PageType);
933 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
934 &mpt->mpt_port_page1.Header, FALSE, 5000);
938 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
939 mpt->mpt_port_page1.Header.PageVersion,
940 mpt->mpt_port_page1.Header.PageLength,
941 mpt->mpt_port_page1.Header.PageNumber,
942 mpt->mpt_port_page1.Header.PageType);
944 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
945 &mpt->mpt_port_page2.Header, FALSE, 5000);
949 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
950 mpt->mpt_port_page2.Header.PageVersion,
951 mpt->mpt_port_page2.Header.PageLength,
952 mpt->mpt_port_page2.Header.PageNumber,
953 mpt->mpt_port_page2.Header.PageType);
955 for (i = 0; i < 16; i++) {
956 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
957 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
961 mpt_lprt(mpt, MPT_PRT_DEBUG,
962 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
963 mpt->mpt_dev_page0[i].Header.PageVersion,
964 mpt->mpt_dev_page0[i].Header.PageLength,
965 mpt->mpt_dev_page0[i].Header.PageNumber,
966 mpt->mpt_dev_page0[i].Header.PageType);
968 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
969 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
973 mpt_lprt(mpt, MPT_PRT_DEBUG,
974 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
975 mpt->mpt_dev_page1[i].Header.PageVersion,
976 mpt->mpt_dev_page1[i].Header.PageLength,
977 mpt->mpt_dev_page1[i].Header.PageNumber,
978 mpt->mpt_dev_page1[i].Header.PageType);
982 * At this point, we don't *have* to fail. As long as we have
983 * valid config header information, we can (barely) lurch
987 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
988 sizeof(mpt->mpt_port_page0), FALSE, 5000);
990 mpt_prt(mpt, "failed to read SPI Port Page 0\n");
992 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
993 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
994 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
995 mpt->mpt_port_page0.Capabilities,
996 mpt->mpt_port_page0.PhysicalInterface);
999 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
1000 sizeof(mpt->mpt_port_page1), FALSE, 5000);
1002 mpt_prt(mpt, "failed to read SPI Port Page 1\n");
1004 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
1005 mpt_lprt(mpt, MPT_PRT_DEBUG,
1006 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
1007 mpt->mpt_port_page1.Configuration,
1008 mpt->mpt_port_page1.OnBusTimerValue);
1011 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
1012 sizeof(mpt->mpt_port_page2), FALSE, 5000);
1014 mpt_prt(mpt, "failed to read SPI Port Page 2\n");
1016 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1017 "Port Page 2: Flags %x Settings %x\n",
1018 mpt->mpt_port_page2.PortFlags,
1019 mpt->mpt_port_page2.PortSettings);
1020 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1021 for (i = 0; i < 16; i++) {
1022 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1023 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1024 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1025 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1026 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1030 for (i = 0; i < 16; i++) {
1031 rv = mpt_read_cur_cfg_page(mpt, i,
1032 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1036 "cannot read SPI Target %d Device Page 0\n", i);
1039 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1040 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1041 "target %d page 0: Negotiated Params %x Information %x\n",
1042 i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1043 mpt->mpt_dev_page0[i].Information);
1045 rv = mpt_read_cur_cfg_page(mpt, i,
1046 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1050 "cannot read SPI Target %d Device Page 1\n", i);
1053 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1054 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1055 "target %d page 1: Requested Params %x Configuration %x\n",
1056 i, mpt->mpt_dev_page1[i].RequestedParameters,
1057 mpt->mpt_dev_page1[i].Configuration);
1063 * Validate SPI configuration information.
1065 * In particular, validate SPI Port Page 1.
1068 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1070 int error, i, pp1val;
1072 mpt->mpt_disc_enable = 0xff;
1073 mpt->mpt_tag_enable = 0;
1075 pp1val = ((1 << mpt->mpt_ini_id) <<
1076 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
1077 if (mpt->mpt_port_page1.Configuration != pp1val) {
1078 CONFIG_PAGE_SCSI_PORT_1 tmp;
1080 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1081 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1082 tmp = mpt->mpt_port_page1;
1083 tmp.Configuration = pp1val;
1084 host2mpt_config_page_scsi_port_1(&tmp);
1085 error = mpt_write_cur_cfg_page(mpt, 0,
1086 &tmp.Header, sizeof(tmp), FALSE, 5000);
1090 error = mpt_read_cur_cfg_page(mpt, 0,
1091 &tmp.Header, sizeof(tmp), FALSE, 5000);
1095 mpt2host_config_page_scsi_port_1(&tmp);
1096 if (tmp.Configuration != pp1val) {
1098 "failed to reset SPI Port Page 1 Config value\n");
1101 mpt->mpt_port_page1 = tmp;
1105 * The purpose of this exercise is to get
1106 * all targets back to async/narrow.
1108 * We skip this step if the BIOS has already negotiated
1109 * speeds with the targets.
1111 i = mpt->mpt_port_page2.PortSettings &
1112 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1113 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1114 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1115 "honoring BIOS transfer negotiations\n");
1117 for (i = 0; i < 16; i++) {
1118 mpt->mpt_dev_page1[i].RequestedParameters = 0;
1119 mpt->mpt_dev_page1[i].Configuration = 0;
1120 (void) mpt_update_spi_config(mpt, i);
1127 mpt_cam_enable(struct mpt_softc *mpt)
1135 if (mpt_read_config_info_fc(mpt)) {
1138 if (mpt_set_initial_config_fc(mpt)) {
1141 } else if (mpt->is_sas) {
1142 if (mpt_read_config_info_sas(mpt)) {
1145 if (mpt_set_initial_config_sas(mpt)) {
1148 } else if (mpt->is_spi) {
1149 if (mpt_read_config_info_spi(mpt)) {
1152 if (mpt_set_initial_config_spi(mpt)) {
1164 mpt_cam_ready(struct mpt_softc *mpt)
1168 * If we're in target mode, hang out resources now
1169 * so we don't cause the world to hang talking to us.
1171 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1173 * Try to add some target command resources
1176 if (mpt_add_target_commands(mpt) == FALSE) {
1177 mpt_prt(mpt, "failed to add target commands\n");
1185 mpt_cam_detach(struct mpt_softc *mpt)
1187 mpt_handler_t handler;
1191 mpt_terminate_recovery_thread(mpt);
1193 handler.reply_handler = mpt_scsi_reply_handler;
1194 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1195 scsi_io_handler_id);
1196 handler.reply_handler = mpt_scsi_tmf_reply_handler;
1197 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1198 scsi_tmf_handler_id);
1199 handler.reply_handler = mpt_fc_els_reply_handler;
1200 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1202 handler.reply_handler = mpt_scsi_tgt_reply_handler;
1203 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1204 mpt->scsi_tgt_handler_id);
1205 handler.reply_handler = mpt_sata_pass_reply_handler;
1206 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1207 sata_pass_handler_id);
1209 if (mpt->tmf_req != NULL) {
1210 mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1211 mpt_free_request(mpt, mpt->tmf_req);
1212 mpt->tmf_req = NULL;
1214 if (mpt->sas_portinfo != NULL) {
1215 free(mpt->sas_portinfo, M_DEVBUF);
1216 mpt->sas_portinfo = NULL;
1219 if (mpt->sim != NULL) {
1220 xpt_free_path(mpt->path);
1221 xpt_bus_deregister(cam_sim_path(mpt->sim));
1222 cam_sim_free(mpt->sim, TRUE);
1226 if (mpt->phydisk_sim != NULL) {
1227 xpt_free_path(mpt->phydisk_path);
1228 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1229 cam_sim_free(mpt->phydisk_sim, TRUE);
1230 mpt->phydisk_sim = NULL;
1235 /* This routine is used after a system crash to dump core onto the swap device.
1238 mpt_poll(struct cam_sim *sim)
1240 struct mpt_softc *mpt;
1242 mpt = (struct mpt_softc *)cam_sim_softc(sim);
1247 * Watchdog timeout routine for SCSI requests.
1250 mpt_timeout(void *arg)
1253 struct mpt_softc *mpt;
1256 ccb = (union ccb *)arg;
1257 mpt = ccb->ccb_h.ccb_mpt_ptr;
1259 MPT_LOCK_ASSERT(mpt);
1260 req = ccb->ccb_h.ccb_req_ptr;
1261 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1262 req->serno, ccb, req->ccb);
1263 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1264 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1265 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1266 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1267 req->state |= REQ_STATE_TIMEDOUT;
1268 mpt_wakeup_recovery_thread(mpt);
1273 * Callback routine from bus_dmamap_load_ccb(9) or, in simple cases, called
1276 * Takes a list of physical segments and builds the SGL for SCSI IO command
1277 * and forwards the commard to the IOC after one last check that CAM has not
1278 * aborted the transaction.
1281 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1283 request_t *req, *trq;
1286 struct mpt_softc *mpt;
1287 bus_addr_t chain_list_addr;
1288 int first_lim, seg, this_seg_lim;
1289 uint32_t addr, cur_off, flags, nxt_off, tf;
1291 MSG_REQUEST_HEADER *hdrp;
1296 req = (request_t *)arg;
1299 mpt = ccb->ccb_h.ccb_mpt_ptr;
1300 req = ccb->ccb_h.ccb_req_ptr;
1302 hdrp = req->req_vbuf;
1303 mpt_off = req->req_vbuf;
1305 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1310 switch (hdrp->Function) {
1311 case MPI_FUNCTION_SCSI_IO_REQUEST:
1312 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1314 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1316 case MPI_FUNCTION_TARGET_ASSIST:
1318 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1321 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1328 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1330 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1331 nseg, mpt->max_seg_cnt);
1336 if (error != EFBIG && error != ENOMEM) {
1337 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1339 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1341 mpt_freeze_ccb(ccb);
1342 if (error == EFBIG) {
1343 status = CAM_REQ_TOO_BIG;
1344 } else if (error == ENOMEM) {
1345 if (mpt->outofbeer == 0) {
1347 xpt_freeze_simq(mpt->sim, 1);
1348 mpt_lprt(mpt, MPT_PRT_DEBUG,
1351 status = CAM_REQUEUE_REQ;
1353 status = CAM_REQ_CMP_ERR;
1355 mpt_set_ccb_status(ccb, status);
1357 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1358 request_t *cmd_req =
1359 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1360 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1361 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1362 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1364 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1365 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1367 mpt_free_request(mpt, req);
1372 * No data to transfer?
1373 * Just make a single simple SGL with zero length.
1376 if (mpt->verbose >= MPT_PRT_DEBUG) {
1377 int tidx = ((char *)sglp) - mpt_off;
1378 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1382 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1383 MPI_pSGE_SET_FLAGS(se1,
1384 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1385 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1386 se1->FlagsLength = htole32(se1->FlagsLength);
1391 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1393 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1394 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1397 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1398 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1402 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1403 bus_dmasync_op_t op;
1405 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1406 op = BUS_DMASYNC_PREREAD;
1408 op = BUS_DMASYNC_PREWRITE;
1411 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1412 op = BUS_DMASYNC_PREWRITE;
1414 op = BUS_DMASYNC_PREREAD;
1417 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1421 * Okay, fill in what we can at the end of the command frame.
1422 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1423 * the command frame.
1425 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1426 * SIMPLE64 pointers and start doing CHAIN64 entries after
1430 if (nseg < MPT_NSGL_FIRST(mpt)) {
1434 * Leave room for CHAIN element
1436 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1439 se = (SGE_SIMPLE64 *) sglp;
1440 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1442 memset(se, 0, sizeof (*se));
1443 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1444 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1445 if (sizeof(bus_addr_t) > 4) {
1446 addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1447 /* SAS1078 36GB limitation WAR */
1448 if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr +
1449 MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) {
1451 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1453 se->Address.High = htole32(addr);
1455 if (seg == first_lim - 1) {
1456 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1458 if (seg == nseg - 1) {
1459 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1460 MPI_SGE_FLAGS_END_OF_BUFFER;
1462 MPI_pSGE_SET_FLAGS(se, tf);
1463 se->FlagsLength = htole32(se->FlagsLength);
1471 * Tell the IOC where to find the first chain element.
1473 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1474 nxt_off = MPT_RQSL(mpt);
1478 * Make up the rest of the data segments out of a chain element
1479 * (contained in the current request frame) which points to
1480 * SIMPLE64 elements in the next request frame, possibly ending
1481 * with *another* chain element (if there's more).
1483 while (seg < nseg) {
1485 * Point to the chain descriptor. Note that the chain
1486 * descriptor is at the end of the *previous* list (whether
1489 ce = (SGE_CHAIN64 *) se;
1492 * Before we change our current pointer, make sure we won't
1493 * overflow the request area with this frame. Note that we
1494 * test against 'greater than' here as it's okay in this case
1495 * to have next offset be just outside the request area.
1497 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1498 nxt_off = MPT_REQUEST_AREA;
1503 * Set our SGE element pointer to the beginning of the chain
1504 * list and update our next chain list offset.
1506 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1508 nxt_off += MPT_RQSL(mpt);
1511 * Now initialize the chain descriptor.
1513 memset(ce, 0, sizeof (*ce));
1516 * Get the physical address of the chain list.
1518 chain_list_addr = trq->req_pbuf;
1519 chain_list_addr += cur_off;
1520 if (sizeof (bus_addr_t) > 4) {
1522 htole32(((uint64_t)chain_list_addr) >> 32);
1524 ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1525 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1526 MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1529 * If we have more than a frame's worth of segments left,
1530 * set up the chain list to have the last element be another
1533 if ((nseg - seg) > MPT_NSGL(mpt)) {
1534 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1536 * The length of the chain is the length in bytes of the
1537 * number of segments plus the next chain element.
1539 * The next chain descriptor offset is the length,
1540 * in words, of the number of segments.
1542 ce->Length = (this_seg_lim - seg) *
1543 sizeof (SGE_SIMPLE64);
1544 ce->NextChainOffset = ce->Length >> 2;
1545 ce->Length += sizeof (SGE_CHAIN64);
1547 this_seg_lim = nseg;
1548 ce->Length = (this_seg_lim - seg) *
1549 sizeof (SGE_SIMPLE64);
1551 ce->Length = htole16(ce->Length);
1554 * Fill in the chain list SGE elements with our segment data.
1556 * If we're the last element in this chain list, set the last
1557 * element flag. If we're the completely last element period,
1558 * set the end of list and end of buffer flags.
1560 while (seg < this_seg_lim) {
1562 memset(se, 0, sizeof (*se));
1563 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1564 se->Address.Low = htole32(dm_segs->ds_addr &
1566 if (sizeof (bus_addr_t) > 4) {
1567 addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1568 /* SAS1078 36GB limitation WAR */
1570 (((uint64_t)dm_segs->ds_addr +
1571 MPI_SGE_LENGTH(se->FlagsLength)) >>
1574 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1576 se->Address.High = htole32(addr);
1578 if (seg == this_seg_lim - 1) {
1579 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1581 if (seg == nseg - 1) {
1582 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1583 MPI_SGE_FLAGS_END_OF_BUFFER;
1585 MPI_pSGE_SET_FLAGS(se, tf);
1586 se->FlagsLength = htole32(se->FlagsLength);
1594 * If we have more segments to do and we've used up all of
1595 * the space in a request area, go allocate another one
1596 * and chain to that.
1598 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1601 nrq = mpt_get_request(mpt, FALSE);
1609 * Append the new request area on the tail of our list.
1611 if ((trq = req->chain) == NULL) {
1614 while (trq->chain != NULL) {
1620 mpt_off = trq->req_vbuf;
1621 if (mpt->verbose >= MPT_PRT_DEBUG) {
1622 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1630 * Last time we need to check if this CCB needs to be aborted.
1632 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1633 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1634 request_t *cmd_req =
1635 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1636 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1637 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1638 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1641 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1642 ccb->ccb_h.status & CAM_STATUS_MASK);
1644 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1646 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1647 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1649 mpt_free_request(mpt, req);
1653 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1654 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1655 mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
1658 if (mpt->verbose > MPT_PRT_DEBUG) {
1660 mpt_print_request(req->req_vbuf);
1661 for (trq = req->chain; trq; trq = trq->chain) {
1662 printf(" Additional Chain Area %d\n", nc++);
1663 mpt_dump_sgl(trq->req_vbuf, 0);
1667 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1668 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1669 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1670 #ifdef WE_TRUST_AUTO_GOOD_STATUS
1671 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1672 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1673 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1675 tgt->state = TGT_STATE_MOVING_DATA;
1678 tgt->state = TGT_STATE_MOVING_DATA;
1681 mpt_send_cmd(mpt, req);
1685 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1687 request_t *req, *trq;
1690 struct mpt_softc *mpt;
1692 uint32_t flags, nxt_off;
1694 MSG_REQUEST_HEADER *hdrp;
1699 req = (request_t *)arg;
1702 mpt = ccb->ccb_h.ccb_mpt_ptr;
1703 req = ccb->ccb_h.ccb_req_ptr;
1705 hdrp = req->req_vbuf;
1706 mpt_off = req->req_vbuf;
1708 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1713 switch (hdrp->Function) {
1714 case MPI_FUNCTION_SCSI_IO_REQUEST:
1715 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1716 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1718 case MPI_FUNCTION_TARGET_ASSIST:
1720 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1723 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1730 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1732 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1733 nseg, mpt->max_seg_cnt);
1738 if (error != EFBIG && error != ENOMEM) {
1739 mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1741 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1743 mpt_freeze_ccb(ccb);
1744 if (error == EFBIG) {
1745 status = CAM_REQ_TOO_BIG;
1746 } else if (error == ENOMEM) {
1747 if (mpt->outofbeer == 0) {
1749 xpt_freeze_simq(mpt->sim, 1);
1750 mpt_lprt(mpt, MPT_PRT_DEBUG,
1753 status = CAM_REQUEUE_REQ;
1755 status = CAM_REQ_CMP_ERR;
1757 mpt_set_ccb_status(ccb, status);
1759 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1760 request_t *cmd_req =
1761 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1762 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1763 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1764 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1766 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1767 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1769 mpt_free_request(mpt, req);
1774 * No data to transfer?
1775 * Just make a single simple SGL with zero length.
1778 if (mpt->verbose >= MPT_PRT_DEBUG) {
1779 int tidx = ((char *)sglp) - mpt_off;
1780 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1784 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1785 MPI_pSGE_SET_FLAGS(se1,
1786 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1787 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1788 se1->FlagsLength = htole32(se1->FlagsLength);
1793 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1795 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1796 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1799 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1800 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1804 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1805 bus_dmasync_op_t op;
1807 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1808 op = BUS_DMASYNC_PREREAD;
1810 op = BUS_DMASYNC_PREWRITE;
1813 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1814 op = BUS_DMASYNC_PREWRITE;
1816 op = BUS_DMASYNC_PREREAD;
1819 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1823 * Okay, fill in what we can at the end of the command frame.
1824 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1825 * the command frame.
1827 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1828 * SIMPLE32 pointers and start doing CHAIN32 entries after
1832 if (nseg < MPT_NSGL_FIRST(mpt)) {
1836 * Leave room for CHAIN element
1838 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1841 se = (SGE_SIMPLE32 *) sglp;
1842 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1845 memset(se, 0,sizeof (*se));
1846 se->Address = htole32(dm_segs->ds_addr);
1848 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1850 if (seg == first_lim - 1) {
1851 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1853 if (seg == nseg - 1) {
1854 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1855 MPI_SGE_FLAGS_END_OF_BUFFER;
1857 MPI_pSGE_SET_FLAGS(se, tf);
1858 se->FlagsLength = htole32(se->FlagsLength);
1866 * Tell the IOC where to find the first chain element.
1868 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1869 nxt_off = MPT_RQSL(mpt);
1873 * Make up the rest of the data segments out of a chain element
1874 * (contained in the current request frame) which points to
1875 * SIMPLE32 elements in the next request frame, possibly ending
1876 * with *another* chain element (if there's more).
1878 while (seg < nseg) {
1880 uint32_t tf, cur_off;
1881 bus_addr_t chain_list_addr;
1884 * Point to the chain descriptor. Note that the chain
1885 * descriptor is at the end of the *previous* list (whether
1888 ce = (SGE_CHAIN32 *) se;
1891 * Before we change our current pointer, make sure we won't
1892 * overflow the request area with this frame. Note that we
1893 * test against 'greater than' here as it's okay in this case
1894 * to have next offset be just outside the request area.
1896 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1897 nxt_off = MPT_REQUEST_AREA;
1902 * Set our SGE element pointer to the beginning of the chain
1903 * list and update our next chain list offset.
1905 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1907 nxt_off += MPT_RQSL(mpt);
1910 * Now initialize the chain descriptor.
1912 memset(ce, 0, sizeof (*ce));
1915 * Get the physical address of the chain list.
1917 chain_list_addr = trq->req_pbuf;
1918 chain_list_addr += cur_off;
1922 ce->Address = htole32(chain_list_addr);
1923 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1927 * If we have more than a frame's worth of segments left,
1928 * set up the chain list to have the last element be another
1931 if ((nseg - seg) > MPT_NSGL(mpt)) {
1932 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1934 * The length of the chain is the length in bytes of the
1935 * number of segments plus the next chain element.
1937 * The next chain descriptor offset is the length,
1938 * in words, of the number of segments.
1940 ce->Length = (this_seg_lim - seg) *
1941 sizeof (SGE_SIMPLE32);
1942 ce->NextChainOffset = ce->Length >> 2;
1943 ce->Length += sizeof (SGE_CHAIN32);
1945 this_seg_lim = nseg;
1946 ce->Length = (this_seg_lim - seg) *
1947 sizeof (SGE_SIMPLE32);
1949 ce->Length = htole16(ce->Length);
1952 * Fill in the chain list SGE elements with our segment data.
1954 * If we're the last element in this chain list, set the last
1955 * element flag. If we're the completely last element period,
1956 * set the end of list and end of buffer flags.
1958 while (seg < this_seg_lim) {
1959 memset(se, 0, sizeof (*se));
1960 se->Address = htole32(dm_segs->ds_addr);
1962 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1964 if (seg == this_seg_lim - 1) {
1965 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1967 if (seg == nseg - 1) {
1968 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1969 MPI_SGE_FLAGS_END_OF_BUFFER;
1971 MPI_pSGE_SET_FLAGS(se, tf);
1972 se->FlagsLength = htole32(se->FlagsLength);
1980 * If we have more segments to do and we've used up all of
1981 * the space in a request area, go allocate another one
1982 * and chain to that.
1984 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1987 nrq = mpt_get_request(mpt, FALSE);
1995 * Append the new request area on the tail of our list.
1997 if ((trq = req->chain) == NULL) {
2000 while (trq->chain != NULL) {
2006 mpt_off = trq->req_vbuf;
2007 if (mpt->verbose >= MPT_PRT_DEBUG) {
2008 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
2016 * Last time we need to check if this CCB needs to be aborted.
2018 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2019 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2020 request_t *cmd_req =
2021 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2022 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2023 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2024 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2027 "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2028 ccb->ccb_h.status & CAM_STATUS_MASK);
2030 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2032 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2033 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2035 mpt_free_request(mpt, req);
2039 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2040 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2041 mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
2044 if (mpt->verbose > MPT_PRT_DEBUG) {
2046 mpt_print_request(req->req_vbuf);
2047 for (trq = req->chain; trq; trq = trq->chain) {
2048 printf(" Additional Chain Area %d\n", nc++);
2049 mpt_dump_sgl(trq->req_vbuf, 0);
2053 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2054 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2055 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2056 #ifdef WE_TRUST_AUTO_GOOD_STATUS
2057 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2058 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2059 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2061 tgt->state = TGT_STATE_MOVING_DATA;
2064 tgt->state = TGT_STATE_MOVING_DATA;
2067 mpt_send_cmd(mpt, req);
2071 mpt_start(struct cam_sim *sim, union ccb *ccb)
2074 struct mpt_softc *mpt;
2075 MSG_SCSI_IO_REQUEST *mpt_req;
2076 struct ccb_scsiio *csio = &ccb->csio;
2077 struct ccb_hdr *ccbh = &ccb->ccb_h;
2078 bus_dmamap_callback_t *cb;
2083 /* Get the pointer for the physical addapter */
2084 mpt = ccb->ccb_h.ccb_mpt_ptr;
2085 raid_passthru = (sim == mpt->phydisk_sim);
2087 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2088 if (mpt->outofbeer == 0) {
2090 xpt_freeze_simq(mpt->sim, 1);
2091 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2093 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2094 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2099 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2102 if (sizeof (bus_addr_t) > 4) {
2103 cb = mpt_execute_req_a64;
2105 cb = mpt_execute_req;
2109 * Link the ccb and the request structure so we can find
2110 * the other knowing either the request or the ccb
2113 ccb->ccb_h.ccb_req_ptr = req;
2115 /* Now we build the command for the IOC */
2116 mpt_req = req->req_vbuf;
2117 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2119 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2120 if (raid_passthru) {
2121 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2122 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2123 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2124 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2128 mpt_req->Bus = 0; /* we never set bus here */
2130 tgt = ccb->ccb_h.target_id;
2131 mpt_req->Bus = 0; /* XXX */
2134 mpt_req->SenseBufferLength =
2135 (csio->sense_len < MPT_SENSE_SIZE) ?
2136 csio->sense_len : MPT_SENSE_SIZE;
2139 * We use the message context to find the request structure when we
2140 * Get the command completion interrupt from the IOC.
2142 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2144 /* Which physical device to do the I/O on */
2145 mpt_req->TargetID = tgt;
2147 /* We assume a single level LUN type */
2148 if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) {
2149 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
2150 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
2152 mpt_req->LUN[1] = ccb->ccb_h.target_lun;
2155 /* Set the direction of the transfer */
2156 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2157 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2158 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2159 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2161 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2164 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2165 switch(ccb->csio.tag_action) {
2166 case MSG_HEAD_OF_Q_TAG:
2167 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2170 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2172 case MSG_ORDERED_Q_TAG:
2173 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2175 case MSG_SIMPLE_Q_TAG:
2177 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2181 if (mpt->is_fc || mpt->is_sas) {
2182 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2184 /* XXX No such thing for a target doing packetized. */
2185 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2190 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2191 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2194 mpt_req->Control = htole32(mpt_req->Control);
2196 /* Copy the scsi command block into place */
2197 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2198 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2200 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2203 mpt_req->CDBLength = csio->cdb_len;
2204 mpt_req->DataLength = htole32(csio->dxfer_len);
2205 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2208 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2210 if (mpt->verbose == MPT_PRT_DEBUG) {
2212 mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2213 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2214 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2215 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2216 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2217 mpt_prtc(mpt, "(%s %u byte%s ",
2218 (df == MPI_SCSIIO_CONTROL_READ)?
2219 "read" : "write", csio->dxfer_len,
2220 (csio->dxfer_len == 1)? ")" : "s)");
2222 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
2223 ccb->ccb_h.target_lun, req, req->serno);
2226 error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb,
2228 if (error == EINPROGRESS) {
2230 * So as to maintain ordering, freeze the controller queue
2231 * until our mapping is returned.
2233 xpt_freeze_simq(mpt->sim, 1);
2234 ccbh->status |= CAM_RELEASE_SIMQ;
2239 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2246 error = mpt_scsi_send_tmf(mpt,
2247 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2248 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2249 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2250 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2251 0, /* XXX How do I get the channel ID? */
2252 tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2253 lun != CAM_LUN_WILDCARD ? lun : 0,
2258 * mpt_scsi_send_tmf hard resets on failure, so no
2259 * need to do so here.
2262 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2266 /* Wait for bus reset to be processed by the IOC. */
2267 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2268 REQ_STATE_DONE, sleep_ok, 5000);
2270 status = le16toh(mpt->tmf_req->IOCStatus);
2271 response = mpt->tmf_req->ResponseCode;
2272 mpt->tmf_req->state = REQ_STATE_FREE;
2275 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2276 "Resetting controller.\n");
2277 mpt_reset(mpt, TRUE);
2281 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2282 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2283 "Resetting controller.\n", status);
2284 mpt_reset(mpt, TRUE);
2288 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2289 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2290 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2291 "Resetting controller.\n", response);
2292 mpt_reset(mpt, TRUE);
2299 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2303 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2305 req = mpt_get_request(mpt, FALSE);
2310 memset(fc, 0, sizeof(*fc));
2311 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2312 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2313 fc->MsgContext = htole32(req->index | fc_els_handler_id);
2314 mpt_send_cmd(mpt, req);
2316 r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2317 REQ_STATE_DONE, FALSE, 60 * 1000);
2319 mpt_free_request(mpt, req);
2326 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2327 MSG_EVENT_NOTIFY_REPLY *msg)
2329 uint32_t data0, data1;
2331 data0 = le32toh(msg->Data[0]);
2332 data1 = le32toh(msg->Data[1]);
2333 switch(msg->Event & 0xFF) {
2334 case MPI_EVENT_UNIT_ATTENTION:
2335 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2336 (data0 >> 8) & 0xff, data0 & 0xff);
2339 case MPI_EVENT_IOC_BUS_RESET:
2340 /* We generated a bus reset */
2341 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2342 (data0 >> 8) & 0xff);
2343 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2346 case MPI_EVENT_EXT_BUS_RESET:
2347 /* Someone else generated a bus reset */
2348 mpt_prt(mpt, "External Bus Reset Detected\n");
2350 * These replies don't return EventData like the MPI
2353 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2356 case MPI_EVENT_RESCAN:
2361 * In general this means a device has been added to the loop.
2363 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2364 if (mpt->ready == 0) {
2367 if (mpt->phydisk_sim) {
2368 pathid = cam_sim_path(mpt->phydisk_sim);
2370 pathid = cam_sim_path(mpt->sim);
2373 * Allocate a CCB, create a wildcard path for this bus,
2374 * and schedule a rescan.
2376 ccb = xpt_alloc_ccb_nowait();
2378 mpt_prt(mpt, "unable to alloc CCB for rescan\n");
2382 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
2383 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2384 mpt_prt(mpt, "unable to create path for rescan\n");
2392 case MPI_EVENT_LINK_STATUS_CHANGE:
2393 mpt_prt(mpt, "Port %d: LinkState: %s\n",
2394 (data1 >> 8) & 0xff,
2395 ((data0 & 0xff) == 0)? "Failed" : "Active");
2398 case MPI_EVENT_LOOP_STATE_CHANGE:
2399 switch ((data0 >> 16) & 0xff) {
2402 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2403 "(Loop Initialization)\n",
2404 (data1 >> 8) & 0xff,
2405 (data0 >> 8) & 0xff,
2407 switch ((data0 >> 8) & 0xff) {
2409 if ((data0 & 0xff) == 0xF7) {
2410 mpt_prt(mpt, "Device needs AL_PA\n");
2412 mpt_prt(mpt, "Device %02x doesn't like "
2418 if ((data0 & 0xff) == 0xF7) {
2419 mpt_prt(mpt, "Device had loop failure "
2420 "at its receiver prior to acquiring"
2423 mpt_prt(mpt, "Device %02x detected loop"
2424 " failure at its receiver\n",
2429 mpt_prt(mpt, "Device %02x requests that device "
2430 "%02x reset itself\n",
2432 (data0 >> 8) & 0xFF);
2437 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2438 "LPE(%02x,%02x) (Loop Port Enable)\n",
2439 (data1 >> 8) & 0xff, /* Port */
2440 (data0 >> 8) & 0xff, /* Character 3 */
2441 (data0 ) & 0xff /* Character 4 */);
2444 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2445 "LPB(%02x,%02x) (Loop Port Bypass)\n",
2446 (data1 >> 8) & 0xff, /* Port */
2447 (data0 >> 8) & 0xff, /* Character 3 */
2448 (data0 ) & 0xff /* Character 4 */);
2451 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2452 "FC event (%02x %02x %02x)\n",
2453 (data1 >> 8) & 0xff, /* Port */
2454 (data0 >> 16) & 0xff, /* Event */
2455 (data0 >> 8) & 0xff, /* Character 3 */
2456 (data0 ) & 0xff /* Character 4 */);
2460 case MPI_EVENT_LOGOUT:
2461 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2462 (data1 >> 8) & 0xff, data0);
2464 case MPI_EVENT_QUEUE_FULL:
2466 struct cam_sim *sim;
2467 struct cam_path *tmppath;
2468 struct ccb_relsim crs;
2469 PTR_EVENT_DATA_QUEUE_FULL pqf;
2472 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2473 pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2475 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x "
2477 pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2479 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2480 pqf->TargetID) != 0) {
2481 sim = mpt->phydisk_sim;
2485 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2486 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2487 pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2488 mpt_prt(mpt, "unable to create a path to send "
2492 xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2493 crs.ccb_h.func_code = XPT_REL_SIMQ;
2494 crs.ccb_h.flags = CAM_DEV_QFREEZE;
2495 crs.release_flags = RELSIM_ADJUST_OPENINGS;
2496 crs.openings = pqf->CurrentDepth - 1;
2497 xpt_action((union ccb *)&crs);
2498 if (crs.ccb_h.status != CAM_REQ_CMP) {
2499 mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2501 xpt_free_path(tmppath);
2505 case MPI_EVENT_IR_RESYNC_UPDATE:
2506 mpt_prt(mpt, "IR resync update %d completed\n",
2507 (data0 >> 16) & 0xff);
2509 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2512 struct cam_sim *sim;
2513 struct cam_path *tmppath;
2514 PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
2516 psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
2517 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2518 psdsc->TargetID) != 0)
2519 sim = mpt->phydisk_sim;
2522 switch(psdsc->ReasonCode) {
2523 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2524 ccb = xpt_alloc_ccb_nowait();
2527 "unable to alloc CCB for rescan\n");
2530 if (xpt_create_path(&ccb->ccb_h.path, NULL,
2531 cam_sim_path(sim), psdsc->TargetID,
2532 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2534 "unable to create path for rescan\n");
2540 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2541 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2542 psdsc->TargetID, CAM_LUN_WILDCARD) !=
2545 "unable to create path for async event");
2548 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2549 xpt_free_path(tmppath);
2551 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
2552 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
2553 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2556 mpt_lprt(mpt, MPT_PRT_WARN,
2557 "SAS device status change: Bus: 0x%02x TargetID: "
2558 "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
2559 psdsc->TargetID, psdsc->ReasonCode);
2564 case MPI_EVENT_SAS_DISCOVERY_ERROR:
2566 PTR_EVENT_DATA_DISCOVERY_ERROR pde;
2568 pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
2569 pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
2570 mpt_lprt(mpt, MPT_PRT_WARN,
2571 "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
2572 pde->Port, pde->DiscoveryStatus);
2575 case MPI_EVENT_EVENT_CHANGE:
2576 case MPI_EVENT_INTEGRATED_RAID:
2578 case MPI_EVENT_LOG_ENTRY_ADDED:
2579 case MPI_EVENT_SAS_DISCOVERY:
2580 case MPI_EVENT_SAS_PHY_LINK_STATUS:
2581 case MPI_EVENT_SAS_SES:
2584 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2592 * Reply path for all SCSI I/O requests, called from our
2593 * interrupt handler by extracting our handler index from
2594 * the MsgContext field of the reply from the IOC.
2596 * This routine is optimized for the common case of a
2597 * completion without error. All exception handling is
2598 * offloaded to non-inlined helper routines to minimize
2602 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2603 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2605 MSG_SCSI_IO_REQUEST *scsi_req;
2608 if (req->state == REQ_STATE_FREE) {
2609 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2613 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2616 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2621 mpt_req_untimeout(req, mpt_timeout, ccb);
2622 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2624 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2625 bus_dmasync_op_t op;
2627 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2628 op = BUS_DMASYNC_POSTREAD;
2630 op = BUS_DMASYNC_POSTWRITE;
2631 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2632 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2635 if (reply_frame == NULL) {
2637 * Context only reply, completion without error status.
2639 ccb->csio.resid = 0;
2640 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2641 ccb->csio.scsi_status = SCSI_STATUS_OK;
2643 mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2646 if (mpt->outofbeer) {
2647 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2649 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2651 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2652 struct scsi_inquiry_data *iq =
2653 (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2654 if (scsi_req->Function ==
2655 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2657 * Fake out the device type so that only the
2658 * pass-thru device will attach.
2660 iq->device &= ~0x1F;
2661 iq->device |= T_NODEVICE;
2664 if (mpt->verbose == MPT_PRT_DEBUG) {
2665 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2668 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2670 if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2671 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2673 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2675 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2677 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2678 ("CCB req needed wakeup"));
2680 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2682 mpt_free_request(mpt, req);
2687 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2688 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2690 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2692 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2694 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2696 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2697 /* Record IOC Status and Response Code of TMF for any waiters. */
2698 req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2699 req->ResponseCode = tmf_reply->ResponseCode;
2701 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2702 req, req->serno, le16toh(tmf_reply->IOCStatus));
2703 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2704 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2705 req->state |= REQ_STATE_DONE;
2708 mpt->tmf_req->state = REQ_STATE_FREE;
2714 * XXX: Move to definitions file
2732 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2733 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2736 MSG_LINK_SERVICE_RSP_REQUEST tmp;
2737 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2740 * We are going to reuse the ELS request to send this response back.
2743 memset(rsp, 0, sizeof(*rsp));
2745 #ifdef USE_IMMEDIATE_LINK_DATA
2747 * Apparently the IMMEDIATE stuff doesn't seem to work.
2749 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2751 rsp->RspLength = length;
2752 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2753 rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2756 * Copy over information from the original reply frame to
2757 * it's correct place in the response.
2759 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2762 * And now copy back the temporary area to the original frame.
2764 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2765 rsp = req->req_vbuf;
2767 #ifdef USE_IMMEDIATE_LINK_DATA
2768 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2771 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2772 bus_addr_t paddr = req->req_pbuf;
2773 paddr += MPT_RQSL(mpt);
2776 MPI_SGE_FLAGS_HOST_TO_IOC |
2777 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2778 MPI_SGE_FLAGS_LAST_ELEMENT |
2779 MPI_SGE_FLAGS_END_OF_LIST |
2780 MPI_SGE_FLAGS_END_OF_BUFFER;
2781 fl <<= MPI_SGE_FLAGS_SHIFT;
2783 se->FlagsLength = htole32(fl);
2784 se->Address = htole32((uint32_t) paddr);
2791 mpt_send_cmd(mpt, req);
2795 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2796 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2798 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2799 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2803 U16 status = le16toh(reply_frame->IOCStatus);
2806 int do_refresh = TRUE;
2809 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2810 ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2811 req, req->serno, rp->Function));
2812 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2813 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2815 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2818 mpt_lprt(mpt, MPT_PRT_DEBUG,
2819 "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2820 req, req->serno, reply_frame, reply_frame->Function);
2822 if (status != MPI_IOCSTATUS_SUCCESS) {
2823 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2824 status, reply_frame->Function);
2825 if (status == MPI_IOCSTATUS_INVALID_STATE) {
2827 * XXX: to get around shutdown issue
2836 * If the function of a link service response, we recycle the
2837 * response to be a refresh for a new link service request.
2839 * The request pointer is bogus in this case and we have to fetch
2840 * it based upon the TransactionContext.
2842 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2843 /* Freddie Uncle Charlie Katie */
2844 /* We don't get the IOINDEX as part of the Link Svc Rsp */
2845 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2846 if (mpt->els_cmd_ptrs[ioindex] == req) {
2850 KASSERT(ioindex < mpt->els_cmds_allocated,
2851 ("can't find my mommie!"));
2853 /* remove from active list as we're going to re-post it */
2854 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2855 req->state &= ~REQ_STATE_QUEUED;
2856 req->state |= REQ_STATE_DONE;
2857 mpt_fc_post_els(mpt, req, ioindex);
2861 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2862 /* remove from active list as we're done */
2863 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2864 req->state &= ~REQ_STATE_QUEUED;
2865 req->state |= REQ_STATE_DONE;
2866 if (req->state & REQ_STATE_TIMEDOUT) {
2867 mpt_lprt(mpt, MPT_PRT_DEBUG,
2868 "Sync Primitive Send Completed After Timeout\n");
2869 mpt_free_request(mpt, req);
2870 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2871 mpt_lprt(mpt, MPT_PRT_DEBUG,
2872 "Async Primitive Send Complete\n");
2873 mpt_free_request(mpt, req);
2875 mpt_lprt(mpt, MPT_PRT_DEBUG,
2876 "Sync Primitive Send Complete- Waking Waiter\n");
2882 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2883 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2884 "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2885 rp->MsgLength, rp->MsgFlags);
2889 if (rp->MsgLength <= 5) {
2891 * This is just a ack of an original ELS buffer post
2893 mpt_lprt(mpt, MPT_PRT_DEBUG,
2894 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2899 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2900 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2902 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2903 cmd = be32toh(elsbuf[0]) >> 24;
2905 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2906 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2910 ioindex = le32toh(rp->TransactionContext);
2911 req = mpt->els_cmd_ptrs[ioindex];
2913 if (rctl == ELS && type == 1) {
2917 * Send back a PRLI ACC
2919 mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2920 le32toh(rp->Wwn.PortNameHigh),
2921 le32toh(rp->Wwn.PortNameLow));
2922 elsbuf[0] = htobe32(0x02100014);
2923 elsbuf[1] |= htobe32(0x00000100);
2924 elsbuf[4] = htobe32(0x00000002);
2925 if (mpt->role & MPT_ROLE_TARGET)
2926 elsbuf[4] |= htobe32(0x00000010);
2927 if (mpt->role & MPT_ROLE_INITIATOR)
2928 elsbuf[4] |= htobe32(0x00000020);
2929 /* remove from active list as we're done */
2930 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2931 req->state &= ~REQ_STATE_QUEUED;
2932 req->state |= REQ_STATE_DONE;
2933 mpt_fc_els_send_response(mpt, req, rp, 20);
2937 memset(elsbuf, 0, 5 * (sizeof (U32)));
2938 elsbuf[0] = htobe32(0x02100014);
2939 elsbuf[1] = htobe32(0x08000100);
2940 mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2941 le32toh(rp->Wwn.PortNameHigh),
2942 le32toh(rp->Wwn.PortNameLow));
2943 /* remove from active list as we're done */
2944 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2945 req->state &= ~REQ_STATE_QUEUED;
2946 req->state |= REQ_STATE_DONE;
2947 mpt_fc_els_send_response(mpt, req, rp, 20);
2951 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2954 } else if (rctl == ABTS && type == 0) {
2955 uint16_t rx_id = le16toh(rp->Rxid);
2956 uint16_t ox_id = le16toh(rp->Oxid);
2957 request_t *tgt_req = NULL;
2960 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2961 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2962 le32toh(rp->Wwn.PortNameLow));
2963 if (rx_id >= mpt->mpt_max_tgtcmds) {
2964 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2965 } else if (mpt->tgt_cmd_ptrs == NULL) {
2966 mpt_prt(mpt, "No TGT CMD PTRS\n");
2968 tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2971 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2976 * Check to make sure we have the correct command
2977 * The reply descriptor in the target state should
2978 * should contain an IoIndex that should match the
2981 * It'd be nice to have OX_ID to crosscheck with
2984 ct_id = GET_IO_INDEX(tgt->reply_desc);
2986 if (ct_id != rx_id) {
2987 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2988 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2996 "CCB (%p): lun %u flags %x status %x\n",
2997 ccb, ccb->ccb_h.target_lun,
2998 ccb->ccb_h.flags, ccb->ccb_h.status);
3000 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
3001 "%x nxfers %x\n", tgt->state,
3002 tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
3005 if (mpt_abort_target_cmd(mpt, tgt_req)) {
3006 mpt_prt(mpt, "unable to start TargetAbort\n");
3009 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
3011 memset(elsbuf, 0, 5 * (sizeof (U32)));
3012 elsbuf[0] = htobe32(0);
3013 elsbuf[1] = htobe32((ox_id << 16) | rx_id);
3014 elsbuf[2] = htobe32(0x000ffff);
3016 * Dork with the reply frame so that the response to it
3019 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
3020 /* remove from active list as we're done */
3021 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3022 req->state &= ~REQ_STATE_QUEUED;
3023 req->state |= REQ_STATE_DONE;
3024 mpt_fc_els_send_response(mpt, req, rp, 12);
3027 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3029 if (do_refresh == TRUE) {
3030 /* remove from active list as we're done */
3031 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3032 req->state &= ~REQ_STATE_QUEUED;
3033 req->state |= REQ_STATE_DONE;
3034 mpt_fc_post_els(mpt, req, ioindex);
3040 * Clean up all SCSI Initiator personality state in response
3041 * to a controller reset.
3044 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3048 * The pending list is already run down by
3049 * the generic handler. Perform the same
3050 * operation on the timed out request list.
3052 mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3053 MPI_IOCSTATUS_INVALID_STATE);
3056 * XXX: We need to repost ELS and Target Command Buffers?
3060 * Inform the XPT that a bus reset has occurred.
3062 xpt_async(AC_BUS_RESET, mpt->path, NULL);
3066 * Parse additional completion information in the reply
3067 * frame for SCSI I/O requests.
3070 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3071 MSG_DEFAULT_REPLY *reply_frame)
3074 MSG_SCSI_IO_REPLY *scsi_io_reply;
3078 MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3079 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3080 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3081 ("MPT SCSI I/O Handler called with incorrect reply type"));
3082 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3083 ("MPT SCSI I/O Handler called with continuation reply"));
3085 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3086 ioc_status = le16toh(scsi_io_reply->IOCStatus);
3087 ioc_status &= MPI_IOCSTATUS_MASK;
3088 sstate = scsi_io_reply->SCSIState;
3092 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3094 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3095 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3096 uint32_t sense_returned;
3098 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3100 sense_returned = le32toh(scsi_io_reply->SenseCount);
3101 if (sense_returned < ccb->csio.sense_len)
3102 ccb->csio.sense_resid = ccb->csio.sense_len -
3105 ccb->csio.sense_resid = 0;
3107 bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
3108 bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3109 min(ccb->csio.sense_len, sense_returned));
3112 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3114 * Tag messages rejected, but non-tagged retry
3117 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3121 switch(ioc_status) {
3122 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3125 * Linux driver indicates that a zero
3126 * transfer length with this error code
3127 * indicates a CRC error.
3129 * No need to swap the bytes for checking
3132 if (scsi_io_reply->TransferCount == 0) {
3133 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3137 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3138 case MPI_IOCSTATUS_SUCCESS:
3139 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3140 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3142 * Status was never returned for this transaction.
3144 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3145 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3146 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3147 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3148 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3149 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3150 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3152 /* XXX Handle SPI-Packet and FCP-2 response info. */
3153 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3155 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3157 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3158 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3160 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3161 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3163 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3165 * Since selection timeouts and "device really not
3166 * there" are grouped into this error code, report
3167 * selection timeout. Selection timeouts are
3168 * typically retried before giving up on the device
3169 * whereas "device not there" errors are considered
3172 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3174 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3175 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3177 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3178 mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3180 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3181 mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3183 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3184 ccb->ccb_h.status = CAM_UA_TERMIO;
3186 case MPI_IOCSTATUS_INVALID_STATE:
3188 * The IOC has been reset. Emulate a bus reset.
3191 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3192 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3194 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3195 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3197 * Don't clobber any timeout status that has
3198 * already been set for this transaction. We
3199 * want the SCSI layer to be able to differentiate
3200 * between the command we aborted due to timeout
3201 * and any innocent bystanders.
3203 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3205 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3208 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3209 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3211 case MPI_IOCSTATUS_BUSY:
3212 mpt_set_ccb_status(ccb, CAM_BUSY);
3214 case MPI_IOCSTATUS_INVALID_FUNCTION:
3215 case MPI_IOCSTATUS_INVALID_SGL:
3216 case MPI_IOCSTATUS_INTERNAL_ERROR:
3217 case MPI_IOCSTATUS_INVALID_FIELD:
3220 * Some of the above may need to kick
3221 * of a recovery action!!!!
3223 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3227 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3228 mpt_freeze_ccb(ccb);
3235 mpt_action(struct cam_sim *sim, union ccb *ccb)
3237 struct mpt_softc *mpt;
3238 struct ccb_trans_settings *cts;
3243 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3245 mpt = (struct mpt_softc *)cam_sim_softc(sim);
3246 raid_passthru = (sim == mpt->phydisk_sim);
3247 MPT_LOCK_ASSERT(mpt);
3249 tgt = ccb->ccb_h.target_id;
3250 lun = ccb->ccb_h.target_lun;
3251 if (raid_passthru &&
3252 ccb->ccb_h.func_code != XPT_PATH_INQ &&
3253 ccb->ccb_h.func_code != XPT_RESET_BUS &&
3254 ccb->ccb_h.func_code != XPT_RESET_DEV) {
3255 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3256 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3257 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3262 ccb->ccb_h.ccb_mpt_ptr = mpt;
3264 switch (ccb->ccb_h.func_code) {
3265 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3267 * Do a couple of preliminary checks...
3269 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3270 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3271 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3272 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3276 /* Max supported CDB length is 16 bytes */
3277 /* XXX Unless we implement the new 32byte message type */
3278 if (ccb->csio.cdb_len >
3279 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3280 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3281 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3284 #ifdef MPT_TEST_MULTIPATH
3285 if (mpt->failure_id == ccb->ccb_h.target_id) {
3286 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3287 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3291 ccb->csio.scsi_status = SCSI_STATUS_OK;
3292 mpt_start(sim, ccb);
3296 if (raid_passthru) {
3297 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3298 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3302 if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3304 xpt_print(ccb->ccb_h.path, "reset bus\n");
3307 xpt_print(ccb->ccb_h.path, "reset device\n");
3309 (void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3312 * mpt_bus_reset is always successful in that it
3313 * will fall back to a hard reset should a bus
3314 * reset attempt fail.
3316 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3317 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3322 union ccb *accb = ccb->cab.abort_ccb;
3323 switch (accb->ccb_h.func_code) {
3324 case XPT_ACCEPT_TARGET_IO:
3325 case XPT_IMMEDIATE_NOTIFY:
3326 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3328 case XPT_CONT_TARGET_IO:
3329 mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3330 ccb->ccb_h.status = CAM_UA_ABORT;
3333 ccb->ccb_h.status = CAM_UA_ABORT;
3336 ccb->ccb_h.status = CAM_REQ_INVALID;
3342 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3344 #define DP_DISC_ENABLE 0x1
3345 #define DP_DISC_DISABL 0x2
3346 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
3348 #define DP_TQING_ENABLE 0x4
3349 #define DP_TQING_DISABL 0x8
3350 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
3352 #define DP_WIDE 0x10
3353 #define DP_NARROW 0x20
3354 #define DP_WIDTH (DP_WIDE|DP_NARROW)
3356 #define DP_SYNC 0x40
3358 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
3360 struct ccb_trans_settings_scsi *scsi;
3361 struct ccb_trans_settings_spi *spi;
3369 if (mpt->is_fc || mpt->is_sas) {
3370 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3374 scsi = &cts->proto_specific.scsi;
3375 spi = &cts->xport_specific.spi;
3378 * We can be called just to valid transport and proto versions
3380 if (scsi->valid == 0 && spi->valid == 0) {
3381 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3386 * Skip attempting settings on RAID volume disks.
3387 * Other devices on the bus get the normal treatment.
3389 if (mpt->phydisk_sim && raid_passthru == 0 &&
3390 mpt_is_raid_volume(mpt, tgt) != 0) {
3391 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3392 "no transfer settings for RAID vols\n");
3393 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3397 i = mpt->mpt_port_page2.PortSettings &
3398 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3399 j = mpt->mpt_port_page2.PortFlags &
3400 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3401 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3402 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3403 mpt_lprt(mpt, MPT_PRT_ALWAYS,
3404 "honoring BIOS transfer negotiations\n");
3405 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3413 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3414 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3415 DP_DISC_ENABLE : DP_DISC_DISABL;
3418 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3419 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3420 DP_TQING_ENABLE : DP_TQING_DISABL;
3423 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3424 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3425 DP_WIDE : DP_NARROW;
3428 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3430 offset = spi->sync_offset;
3432 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3433 &mpt->mpt_dev_page1[tgt];
3434 offset = ptr->RequestedParameters;
3435 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3436 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3438 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3440 period = spi->sync_period;
3442 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3443 &mpt->mpt_dev_page1[tgt];
3444 period = ptr->RequestedParameters;
3445 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3446 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3449 if (dval & DP_DISC_ENABLE) {
3450 mpt->mpt_disc_enable |= (1 << tgt);
3451 } else if (dval & DP_DISC_DISABL) {
3452 mpt->mpt_disc_enable &= ~(1 << tgt);
3454 if (dval & DP_TQING_ENABLE) {
3455 mpt->mpt_tag_enable |= (1 << tgt);
3456 } else if (dval & DP_TQING_DISABL) {
3457 mpt->mpt_tag_enable &= ~(1 << tgt);
3459 if (dval & DP_WIDTH) {
3460 mpt_setwidth(mpt, tgt, 1);
3462 if (dval & DP_SYNC) {
3463 mpt_setsync(mpt, tgt, period, offset);
3466 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3469 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3470 "set [%d]: 0x%x period 0x%x offset %d\n",
3471 tgt, dval, period, offset);
3472 if (mpt_update_spi_config(mpt, tgt)) {
3473 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3475 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3479 case XPT_GET_TRAN_SETTINGS:
3481 struct ccb_trans_settings_scsi *scsi;
3483 cts->protocol = PROTO_SCSI;
3485 struct ccb_trans_settings_fc *fc =
3486 &cts->xport_specific.fc;
3487 cts->protocol_version = SCSI_REV_SPC;
3488 cts->transport = XPORT_FC;
3489 cts->transport_version = 0;
3490 if (mpt->mpt_fcport_speed != 0) {
3491 fc->valid = CTS_FC_VALID_SPEED;
3492 fc->bitrate = 100000 * mpt->mpt_fcport_speed;
3494 } else if (mpt->is_sas) {
3495 struct ccb_trans_settings_sas *sas =
3496 &cts->xport_specific.sas;
3497 cts->protocol_version = SCSI_REV_SPC2;
3498 cts->transport = XPORT_SAS;
3499 cts->transport_version = 0;
3500 sas->valid = CTS_SAS_VALID_SPEED;
3501 sas->bitrate = 300000;
3503 cts->protocol_version = SCSI_REV_2;
3504 cts->transport = XPORT_SPI;
3505 cts->transport_version = 2;
3506 if (mpt_get_spi_settings(mpt, cts) != 0) {
3507 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3511 scsi = &cts->proto_specific.scsi;
3512 scsi->valid = CTS_SCSI_VALID_TQ;
3513 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3514 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3517 case XPT_CALC_GEOMETRY:
3519 struct ccb_calc_geometry *ccg;
3522 if (ccg->block_size == 0) {
3523 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3524 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3527 cam_calc_geometry(ccg, /* extended */ 1);
3528 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
3531 case XPT_PATH_INQ: /* Path routing inquiry */
3533 struct ccb_pathinq *cpi = &ccb->cpi;
3535 cpi->version_num = 1;
3536 cpi->target_sprt = 0;
3537 cpi->hba_eng_cnt = 0;
3538 cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3539 cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3541 * FC cards report MAX_DEVICES of 512, but
3542 * the MSG_SCSI_IO_REQUEST target id field
3543 * is only 8 bits. Until we fix the driver
3544 * to support 'channels' for bus overflow,
3547 if (cpi->max_target > 255) {
3548 cpi->max_target = 255;
3552 * VMware ESX reports > 16 devices and then dies when we probe.
3554 if (mpt->is_spi && cpi->max_target > 15) {
3555 cpi->max_target = 15;
3560 cpi->max_lun = MPT_MAX_LUNS;
3561 cpi->initiator_id = mpt->mpt_ini_id;
3562 cpi->bus_id = cam_sim_bus(sim);
3565 * The base speed is the speed of the underlying connection.
3567 cpi->protocol = PROTO_SCSI;
3569 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
3570 cpi->base_transfer_speed = 100000;
3571 cpi->hba_inquiry = PI_TAG_ABLE;
3572 cpi->transport = XPORT_FC;
3573 cpi->transport_version = 0;
3574 cpi->protocol_version = SCSI_REV_SPC;
3575 cpi->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn;
3576 cpi->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn;
3577 cpi->xport_specific.fc.port = mpt->scinfo.fc.portid;
3578 cpi->xport_specific.fc.bitrate =
3579 100000 * mpt->mpt_fcport_speed;
3580 } else if (mpt->is_sas) {
3581 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
3582 cpi->base_transfer_speed = 300000;
3583 cpi->hba_inquiry = PI_TAG_ABLE;
3584 cpi->transport = XPORT_SAS;
3585 cpi->transport_version = 0;
3586 cpi->protocol_version = SCSI_REV_SPC2;
3588 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
3589 cpi->base_transfer_speed = 3300;
3590 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3591 cpi->transport = XPORT_SPI;
3592 cpi->transport_version = 2;
3593 cpi->protocol_version = SCSI_REV_2;
3597 * We give our fake RAID passhtru bus a width that is MaxVolumes
3598 * wide and restrict it to one lun.
3600 if (raid_passthru) {
3601 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3602 cpi->initiator_id = cpi->max_target + 1;
3606 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3607 cpi->hba_misc |= PIM_NOINITIATOR;
3609 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3611 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3613 cpi->target_sprt = 0;
3615 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3616 strlcpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3617 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3618 cpi->unit_number = cam_sim_unit(sim);
3619 cpi->ccb_h.status = CAM_REQ_CMP;
3622 case XPT_EN_LUN: /* Enable LUN as a target */
3626 if (ccb->cel.enable)
3627 result = mpt_enable_lun(mpt,
3628 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3630 result = mpt_disable_lun(mpt,
3631 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3633 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3635 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3639 case XPT_NOTIFY_ACKNOWLEDGE: /* recycle notify ack */
3640 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */
3641 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
3643 tgt_resource_t *trtp;
3644 lun_id_t lun = ccb->ccb_h.target_lun;
3645 ccb->ccb_h.sim_priv.entries[0].field = 0;
3646 ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3648 if (lun == CAM_LUN_WILDCARD) {
3649 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3650 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3653 trtp = &mpt->trt_wildcard;
3654 } else if (lun >= MPT_MAX_LUNS) {
3655 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3658 trtp = &mpt->trt[lun];
3660 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3661 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3662 "Put FREE ATIO %p lun %d\n", ccb, lun);
3663 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3665 } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) {
3666 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3667 "Put FREE INOT lun %d\n", lun);
3668 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3671 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3673 mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3676 case XPT_CONT_TARGET_IO:
3677 mpt_target_start_io(mpt, ccb);
3681 ccb->ccb_h.status = CAM_REQ_INVALID;
3688 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3690 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3691 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3693 uint32_t dval, pval, oval;
3696 if (IS_CURRENT_SETTINGS(cts) == 0) {
3697 tgt = cts->ccb_h.target_id;
3698 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3699 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3703 tgt = cts->ccb_h.target_id;
3707 * We aren't looking at Port Page 2 BIOS settings here-
3708 * sometimes these have been known to be bogus XXX.
3710 * For user settings, we pick the max from port page 0
3712 * For current settings we read the current settings out from
3713 * device page 0 for that target.
3715 if (IS_CURRENT_SETTINGS(cts)) {
3716 CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3719 tmp = mpt->mpt_dev_page0[tgt];
3720 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3721 sizeof(tmp), FALSE, 5000);
3723 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3726 mpt2host_config_page_scsi_device_0(&tmp);
3728 mpt_lprt(mpt, MPT_PRT_DEBUG,
3729 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3730 tmp.NegotiatedParameters, tmp.Information);
3731 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3732 DP_WIDE : DP_NARROW;
3733 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3734 DP_DISC_ENABLE : DP_DISC_DISABL;
3735 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3736 DP_TQING_ENABLE : DP_TQING_DISABL;
3737 oval = tmp.NegotiatedParameters;
3738 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3739 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3740 pval = tmp.NegotiatedParameters;
3741 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3742 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3743 mpt->mpt_dev_page0[tgt] = tmp;
3745 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3746 oval = mpt->mpt_port_page0.Capabilities;
3747 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3748 pval = mpt->mpt_port_page0.Capabilities;
3749 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3756 spi->sync_offset = oval;
3757 spi->sync_period = pval;
3758 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3759 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3760 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3761 if (dval & DP_WIDE) {
3762 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3764 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3766 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3767 scsi->valid = CTS_SCSI_VALID_TQ;
3768 if (dval & DP_TQING_ENABLE) {
3769 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3771 spi->valid |= CTS_SPI_VALID_DISC;
3772 if (dval & DP_DISC_ENABLE) {
3773 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3777 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3778 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3779 IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval);
3784 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3786 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3788 ptr = &mpt->mpt_dev_page1[tgt];
3790 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3792 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3797 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3799 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3801 ptr = &mpt->mpt_dev_page1[tgt];
3802 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3803 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3804 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3805 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3806 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3810 ptr->RequestedParameters |=
3811 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3812 ptr->RequestedParameters |=
3813 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3815 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3818 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3819 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3824 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3826 CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3829 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3830 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3831 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3832 tmp = mpt->mpt_dev_page1[tgt];
3833 host2mpt_config_page_scsi_device_1(&tmp);
3834 rv = mpt_write_cur_cfg_page(mpt, tgt,
3835 &tmp.Header, sizeof(tmp), FALSE, 5000);
3837 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3843 /****************************** Timeout Recovery ******************************/
3845 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3849 error = kproc_create(mpt_recovery_thread, mpt,
3850 &mpt->recovery_thread, /*flags*/0,
3851 /*altstack*/0, "mpt_recovery%d", mpt->unit);
3856 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3859 if (mpt->recovery_thread == NULL) {
3862 mpt->shutdwn_recovery = 1;
3865 * Sleep on a slightly different location
3866 * for this interlock just for added safety.
3868 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3872 mpt_recovery_thread(void *arg)
3874 struct mpt_softc *mpt;
3876 mpt = (struct mpt_softc *)arg;
3879 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3880 if (mpt->shutdwn_recovery == 0) {
3881 mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3884 if (mpt->shutdwn_recovery != 0) {
3887 mpt_recover_commands(mpt);
3889 mpt->recovery_thread = NULL;
3890 wakeup(&mpt->recovery_thread);
3896 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3897 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3899 MSG_SCSI_TASK_MGMT *tmf_req;
3903 * Wait for any current TMF request to complete.
3904 * We're only allowed to issue one TMF at a time.
3906 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3907 sleep_ok, MPT_TMF_MAX_TIMEOUT);
3909 mpt_reset(mpt, TRUE);
3913 mpt_assign_serno(mpt, mpt->tmf_req);
3914 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3916 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3917 memset(tmf_req, 0, sizeof(*tmf_req));
3918 tmf_req->TargetID = target;
3919 tmf_req->Bus = channel;
3920 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3921 tmf_req->TaskType = type;
3922 tmf_req->MsgFlags = flags;
3923 tmf_req->MsgContext =
3924 htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3925 if (lun > MPT_MAX_LUNS) {
3926 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
3927 tmf_req->LUN[1] = lun & 0xff;
3929 tmf_req->LUN[1] = lun;
3931 tmf_req->TaskMsgContext = abort_ctx;
3933 mpt_lprt(mpt, MPT_PRT_DEBUG,
3934 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3935 mpt->tmf_req->serno, tmf_req->MsgContext);
3936 if (mpt->verbose > MPT_PRT_DEBUG) {
3937 mpt_print_request(tmf_req);
3940 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3941 ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3942 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3943 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3944 if (error != MPT_OK) {
3945 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
3946 mpt->tmf_req->state = REQ_STATE_FREE;
3947 mpt_reset(mpt, TRUE);
3953 * When a command times out, it is placed on the requeust_timeout_list
3954 * and we wake our recovery thread. The MPT-Fusion architecture supports
3955 * only a single TMF operation at a time, so we serially abort/bdr, etc,
3956 * the timedout transactions. The next TMF is issued either by the
3957 * completion handler of the current TMF waking our recovery thread,
3958 * or the TMF timeout handler causing a hard reset sequence.
3961 mpt_recover_commands(struct mpt_softc *mpt)
3967 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3969 * No work to do- leave.
3971 mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3976 * Flush any commands whose completion coincides with their timeout.
3980 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3982 * The timedout commands have already
3983 * completed. This typically means
3984 * that either the timeout value was on
3985 * the hairy edge of what the device
3986 * requires or - more likely - interrupts
3987 * are not happening.
3989 mpt_prt(mpt, "Timedout requests already complete. "
3990 "Interrupts may not be functioning.\n");
3991 mpt_enable_ints(mpt);
3996 * We have no visibility into the current state of the
3997 * controller, so attempt to abort the commands in the
3998 * order they timed-out. For initiator commands, we
3999 * depend on the reply handler pulling requests off
4002 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4005 MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4007 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4008 req, req->serno, hdrp->Function);
4011 mpt_prt(mpt, "null ccb in timed out request. "
4012 "Resetting Controller.\n");
4013 mpt_reset(mpt, TRUE);
4016 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4019 * Check to see if this is not an initiator command and
4020 * deal with it differently if it is.
4022 switch (hdrp->Function) {
4023 case MPI_FUNCTION_SCSI_IO_REQUEST:
4024 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4028 * XXX: FIX ME: need to abort target assists...
4030 mpt_prt(mpt, "just putting it back on the pend q\n");
4031 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4032 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4037 error = mpt_scsi_send_tmf(mpt,
4038 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4039 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4040 htole32(req->index | scsi_io_handler_id), TRUE);
4044 * mpt_scsi_send_tmf hard resets on failure, so no
4045 * need to do so here. Our queue should be emptied
4046 * by the hard reset.
4051 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4052 REQ_STATE_DONE, TRUE, 500);
4054 status = le16toh(mpt->tmf_req->IOCStatus);
4055 response = mpt->tmf_req->ResponseCode;
4056 mpt->tmf_req->state = REQ_STATE_FREE;
4060 * If we've errored out,, reset the controller.
4062 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4063 "Resetting controller\n");
4064 mpt_reset(mpt, TRUE);
4068 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4069 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4070 "Resetting controller.\n", status);
4071 mpt_reset(mpt, TRUE);
4075 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4076 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4077 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4078 "Resetting controller.\n", response);
4079 mpt_reset(mpt, TRUE);
4082 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4086 /************************ Target Mode Support ****************************/
4088 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4090 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4091 PTR_SGE_TRANSACTION32 tep;
4092 PTR_SGE_SIMPLE32 se;
4096 paddr = req->req_pbuf;
4097 paddr += MPT_RQSL(mpt);
4100 memset(fc, 0, MPT_REQUEST_AREA);
4101 fc->BufferCount = 1;
4102 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4103 fc->MsgContext = htole32(req->index | fc_els_handler_id);
4106 * Okay, set up ELS buffer pointers. ELS buffer pointers
4107 * consist of a TE SGL element (with details length of zero)
4108 * followed by a SIMPLE SGL element which holds the address
4112 tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4114 tep->ContextSize = 4;
4116 tep->TransactionContext[0] = htole32(ioindex);
4118 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4120 MPI_SGE_FLAGS_HOST_TO_IOC |
4121 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4122 MPI_SGE_FLAGS_LAST_ELEMENT |
4123 MPI_SGE_FLAGS_END_OF_LIST |
4124 MPI_SGE_FLAGS_END_OF_BUFFER;
4125 fl <<= MPI_SGE_FLAGS_SHIFT;
4126 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4127 se->FlagsLength = htole32(fl);
4128 se->Address = htole32((uint32_t) paddr);
4129 mpt_lprt(mpt, MPT_PRT_DEBUG,
4130 "add ELS index %d ioindex %d for %p:%u\n",
4131 req->index, ioindex, req, req->serno);
4132 KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4133 ("mpt_fc_post_els: request not locked"));
4134 mpt_send_cmd(mpt, req);
4138 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4140 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4141 PTR_CMD_BUFFER_DESCRIPTOR cb;
4144 paddr = req->req_pbuf;
4145 paddr += MPT_RQSL(mpt);
4146 memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4147 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4150 fc->BufferCount = 1;
4151 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4152 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4154 cb = &fc->Buffer[0];
4155 cb->IoIndex = htole16(ioindex);
4156 cb->u.PhysicalAddress32 = htole32((U32) paddr);
4158 mpt_check_doorbell(mpt);
4159 mpt_send_cmd(mpt, req);
4163 mpt_add_els_buffers(struct mpt_softc *mpt)
4167 if (mpt->is_fc == 0) {
4171 if (mpt->els_cmds_allocated) {
4175 mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
4176 M_DEVBUF, M_NOWAIT | M_ZERO);
4178 if (mpt->els_cmd_ptrs == NULL) {
4183 * Feed the chip some ELS buffer resources
4185 for (i = 0; i < MPT_MAX_ELS; i++) {
4186 request_t *req = mpt_get_request(mpt, FALSE);
4190 req->state |= REQ_STATE_LOCKED;
4191 mpt->els_cmd_ptrs[i] = req;
4192 mpt_fc_post_els(mpt, req, i);
4196 mpt_prt(mpt, "unable to add ELS buffer resources\n");
4197 free(mpt->els_cmd_ptrs, M_DEVBUF);
4198 mpt->els_cmd_ptrs = NULL;
4201 if (i != MPT_MAX_ELS) {
4202 mpt_lprt(mpt, MPT_PRT_INFO,
4203 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS);
4205 mpt->els_cmds_allocated = i;
4210 mpt_add_target_commands(struct mpt_softc *mpt)
4214 if (mpt->tgt_cmd_ptrs) {
4218 max = MPT_MAX_REQUESTS(mpt) >> 1;
4219 if (max > mpt->mpt_max_tgtcmds) {
4220 max = mpt->mpt_max_tgtcmds;
4223 malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4224 if (mpt->tgt_cmd_ptrs == NULL) {
4226 "mpt_add_target_commands: could not allocate cmd ptrs\n");
4230 for (i = 0; i < max; i++) {
4233 req = mpt_get_request(mpt, FALSE);
4237 req->state |= REQ_STATE_LOCKED;
4238 mpt->tgt_cmd_ptrs[i] = req;
4239 mpt_post_target_command(mpt, req, i);
4244 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4245 free(mpt->tgt_cmd_ptrs, M_DEVBUF);
4246 mpt->tgt_cmd_ptrs = NULL;
4250 mpt->tgt_cmds_allocated = i;
4253 mpt_lprt(mpt, MPT_PRT_INFO,
4254 "added %d of %d target bufs\n", i, max);
4260 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4263 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4265 } else if (lun >= MPT_MAX_LUNS) {
4267 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4270 if (mpt->tenabled == 0) {
4272 (void) mpt_fc_reset_link(mpt, 0);
4276 if (lun == CAM_LUN_WILDCARD) {
4277 mpt->trt_wildcard.enabled = 1;
4279 mpt->trt[lun].enabled = 1;
4285 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4289 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4291 } else if (lun >= MPT_MAX_LUNS) {
4293 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4296 if (lun == CAM_LUN_WILDCARD) {
4297 mpt->trt_wildcard.enabled = 0;
4299 mpt->trt[lun].enabled = 0;
4301 for (i = 0; i < MPT_MAX_LUNS; i++) {
4302 if (mpt->trt[lun].enabled) {
4306 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4308 (void) mpt_fc_reset_link(mpt, 0);
4316 * Called with MPT lock held
4319 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4321 struct ccb_scsiio *csio = &ccb->csio;
4322 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4323 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4325 switch (tgt->state) {
4326 case TGT_STATE_IN_CAM:
4328 case TGT_STATE_MOVING_DATA:
4329 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4330 xpt_freeze_simq(mpt->sim, 1);
4331 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4332 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4336 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4337 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4338 mpt_tgt_dump_req_state(mpt, cmd_req);
4339 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4344 if (csio->dxfer_len) {
4345 bus_dmamap_callback_t *cb;
4346 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4350 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4351 ("dxfer_len %u but direction is NONE", csio->dxfer_len));
4353 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4354 if (mpt->outofbeer == 0) {
4356 xpt_freeze_simq(mpt->sim, 1);
4357 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4359 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4360 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4364 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4365 if (sizeof (bus_addr_t) > 4) {
4366 cb = mpt_execute_req_a64;
4368 cb = mpt_execute_req;
4372 ccb->ccb_h.ccb_req_ptr = req;
4375 * Record the currently active ccb and the
4376 * request for it in our target state area.
4381 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4385 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4387 ta->QueueTag = ssp->InitiatorTag;
4388 } else if (mpt->is_spi) {
4389 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4391 ta->QueueTag = sp->Tag;
4393 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4394 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4395 ta->ReplyWord = htole32(tgt->reply_desc);
4396 if (csio->ccb_h.target_lun > MPT_MAX_LUNS) {
4398 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4399 ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4401 ta->LUN[1] = csio->ccb_h.target_lun;
4404 ta->RelativeOffset = tgt->bytes_xfered;
4405 ta->DataLength = ccb->csio.dxfer_len;
4406 if (ta->DataLength > tgt->resid) {
4407 ta->DataLength = tgt->resid;
4411 * XXX Should be done after data transfer completes?
4413 tgt->resid -= csio->dxfer_len;
4414 tgt->bytes_xfered += csio->dxfer_len;
4416 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4417 ta->TargetAssistFlags |=
4418 TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4421 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4422 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4423 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4424 ta->TargetAssistFlags |=
4425 TARGET_ASSIST_FLAGS_AUTO_STATUS;
4428 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4430 mpt_lprt(mpt, MPT_PRT_DEBUG,
4431 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4432 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4433 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4435 error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb,
4437 if (error == EINPROGRESS) {
4438 xpt_freeze_simq(mpt->sim, 1);
4439 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4442 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4445 * XXX: I don't know why this seems to happen, but
4446 * XXX: completing the CCB seems to make things happy.
4447 * XXX: This seems to happen if the initiator requests
4448 * XXX: enough data that we have to do multiple CTIOs.
4450 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4451 mpt_lprt(mpt, MPT_PRT_DEBUG,
4452 "Meaningless STATUS CCB (%p): flags %x status %x "
4453 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4454 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4455 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4456 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4460 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4462 memcpy(sp, &csio->sense_data,
4463 min(csio->sense_len, MPT_SENSE_SIZE));
4465 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4470 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4471 uint32_t lun, int send, uint8_t *data, size_t length)
4473 mpt_tgt_state_t *tgt;
4474 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4482 * We enter with resid set to the data load for the command.
4484 tgt = MPT_TGT_STATE(mpt, cmd_req);
4485 if (length == 0 || tgt->resid == 0) {
4487 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4491 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4492 mpt_prt(mpt, "out of resources- dropping local response\n");
4498 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4502 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4503 ta->QueueTag = ssp->InitiatorTag;
4504 } else if (mpt->is_spi) {
4505 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4506 ta->QueueTag = sp->Tag;
4508 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4509 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4510 ta->ReplyWord = htole32(tgt->reply_desc);
4511 if (lun > MPT_MAX_LUNS) {
4512 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4513 ta->LUN[1] = lun & 0xff;
4517 ta->RelativeOffset = 0;
4518 ta->DataLength = length;
4520 dptr = req->req_vbuf;
4521 dptr += MPT_RQSL(mpt);
4522 pptr = req->req_pbuf;
4523 pptr += MPT_RQSL(mpt);
4524 memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4526 se = (SGE_SIMPLE32 *) &ta->SGL[0];
4527 memset(se, 0,sizeof (*se));
4529 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4531 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4532 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4535 MPI_pSGE_SET_LENGTH(se, length);
4536 flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4537 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4538 MPI_pSGE_SET_FLAGS(se, flags);
4542 tgt->resid -= length;
4543 tgt->bytes_xfered = length;
4544 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4545 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4547 tgt->state = TGT_STATE_MOVING_DATA;
4549 mpt_send_cmd(mpt, req);
4553 * Abort queued up CCBs
4556 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4558 struct mpt_hdr_stailq *lp;
4559 struct ccb_hdr *srch;
4561 union ccb *accb = ccb->cab.abort_ccb;
4562 tgt_resource_t *trtp;
4564 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4566 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4567 trtp = &mpt->trt_wildcard;
4569 trtp = &mpt->trt[ccb->ccb_h.target_lun];
4572 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4574 } else if (accb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) {
4577 return (CAM_REQ_INVALID);
4580 STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4581 if (srch == &accb->ccb_h) {
4583 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4588 accb->ccb_h.status = CAM_REQ_ABORTED;
4590 return (CAM_REQ_CMP);
4592 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4593 return (CAM_PATH_INVALID);
4597 * Ask the MPT to abort the current target command
4600 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4604 PTR_MSG_TARGET_MODE_ABORT abtp;
4606 req = mpt_get_request(mpt, FALSE);
4610 abtp = req->req_vbuf;
4611 memset(abtp, 0, sizeof (*abtp));
4613 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4614 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4615 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4616 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4618 if (mpt->is_fc || mpt->is_sas) {
4619 mpt_send_cmd(mpt, req);
4621 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4627 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4628 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4629 * FC929 to set bogus FC_RSP fields (nonzero residuals
4630 * but w/o RESID fields set). This causes QLogic initiators
4631 * to think maybe that a frame was lost.
4633 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4634 * we use allocated requests to do TARGET_ASSIST and we
4635 * need to know when to release them.
4639 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4640 uint8_t status, uint8_t const *sense_data)
4643 mpt_tgt_state_t *tgt;
4644 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4650 cmd_vbuf = cmd_req->req_vbuf;
4651 cmd_vbuf += MPT_RQSL(mpt);
4652 tgt = MPT_TGT_STATE(mpt, cmd_req);
4654 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4655 if (mpt->outofbeer == 0) {
4657 xpt_freeze_simq(mpt->sim, 1);
4658 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4661 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4662 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4666 "could not allocate status request- dropping\n");
4672 ccb->ccb_h.ccb_mpt_ptr = mpt;
4673 ccb->ccb_h.ccb_req_ptr = req;
4677 * Record the currently active ccb, if any, and the
4678 * request for it in our target state area.
4682 tgt->state = TGT_STATE_SENDING_STATUS;
4685 paddr = req->req_pbuf;
4686 paddr += MPT_RQSL(mpt);
4688 memset(tp, 0, sizeof (*tp));
4689 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4691 PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4692 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4696 sts_vbuf = req->req_vbuf;
4697 sts_vbuf += MPT_RQSL(mpt);
4698 rsp = (uint32_t *) sts_vbuf;
4699 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4702 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4703 * It has to be big-endian in memory and is organized
4704 * in 32 bit words, which are much easier to deal with
4705 * as words which are swizzled as needed.
4707 * All we're filling here is the FC_RSP payload.
4708 * We may just have the chip synthesize it if
4709 * we have no residual and an OK status.
4712 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4716 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */
4717 rsp[3] = htobe32(tgt->resid);
4718 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4719 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4722 if (status == SCSI_STATUS_CHECK_COND) {
4725 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */
4726 rsp[4] = htobe32(MPT_SENSE_SIZE);
4728 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4730 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4731 "TION but no sense data?\n");
4732 memset(&rsp, 0, MPT_SENSE_SIZE);
4734 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4735 rsp[i] = htobe32(rsp[i]);
4737 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4738 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4741 #ifndef WE_TRUST_AUTO_GOOD_STATUS
4742 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4744 rsp[2] = htobe32(rsp[2]);
4745 } else if (mpt->is_sas) {
4746 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4747 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4748 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4750 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4751 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4752 tp->StatusCode = status;
4753 tp->QueueTag = htole16(sp->Tag);
4754 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4757 tp->ReplyWord = htole32(tgt->reply_desc);
4758 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4760 #ifdef WE_CAN_USE_AUTO_REPOST
4761 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4763 if (status == SCSI_STATUS_OK && resplen == 0) {
4764 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4766 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4768 MPI_SGE_FLAGS_HOST_TO_IOC |
4769 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4770 MPI_SGE_FLAGS_LAST_ELEMENT |
4771 MPI_SGE_FLAGS_END_OF_LIST |
4772 MPI_SGE_FLAGS_END_OF_BUFFER;
4773 fl <<= MPI_SGE_FLAGS_SHIFT;
4775 tp->StatusDataSGE.FlagsLength = htole32(fl);
4778 mpt_lprt(mpt, MPT_PRT_DEBUG,
4779 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4780 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4781 req->serno, tgt->resid);
4783 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4784 mpt_req_timeout(req, SBT_1S * 60, mpt_timeout, ccb);
4786 mpt_send_cmd(mpt, req);
4790 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4791 tgt_resource_t *trtp, int init_id)
4793 struct ccb_immediate_notify *inot;
4794 mpt_tgt_state_t *tgt;
4796 tgt = MPT_TGT_STATE(mpt, req);
4797 inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots);
4799 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4800 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4803 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4804 mpt_lprt(mpt, MPT_PRT_DEBUG1,
4805 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4807 inot->initiator_id = init_id; /* XXX */
4809 * This is a somewhat grotesque attempt to map from task management
4810 * to old style SCSI messages. God help us all.
4813 case MPT_ABORT_TASK_SET:
4814 inot->arg = MSG_ABORT_TAG;
4816 case MPT_CLEAR_TASK_SET:
4817 inot->arg = MSG_CLEAR_TASK_SET;
4819 case MPT_TARGET_RESET:
4820 inot->arg = MSG_TARGET_RESET;
4823 inot->arg = MSG_CLEAR_ACA;
4825 case MPT_TERMINATE_TASK:
4826 inot->arg = MSG_ABORT_TAG;
4829 inot->arg = MSG_NOOP;
4833 * XXX KDM we need the sequence/tag number for the target of the
4834 * task management operation, especially if it is an abort.
4836 tgt->ccb = (union ccb *) inot;
4837 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4838 xpt_done((union ccb *)inot);
4842 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4844 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
4845 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
4846 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ',
4847 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I',
4848 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V',
4851 struct ccb_accept_tio *atiop;
4854 mpt_tgt_state_t *tgt;
4855 tgt_resource_t *trtp = NULL;
4860 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4864 * Stash info for the current command where we can get at it later.
4866 vbuf = req->req_vbuf;
4867 vbuf += MPT_RQSL(mpt);
4870 * Get our state pointer set up.
4872 tgt = MPT_TGT_STATE(mpt, req);
4873 if (tgt->state != TGT_STATE_LOADED) {
4874 mpt_tgt_dump_req_state(mpt, req);
4875 panic("bad target state in mpt_scsi_tgt_atio");
4877 memset(tgt, 0, sizeof (mpt_tgt_state_t));
4878 tgt->state = TGT_STATE_IN_CAM;
4879 tgt->reply_desc = reply_desc;
4880 ioindex = GET_IO_INDEX(reply_desc);
4881 if (mpt->verbose >= MPT_PRT_DEBUG) {
4882 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
4883 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
4884 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
4885 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
4888 PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4889 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4890 if (fc->FcpCntl[2]) {
4892 * Task Management Request
4894 switch (fc->FcpCntl[2]) {
4896 fct = MPT_ABORT_TASK_SET;
4899 fct = MPT_CLEAR_TASK_SET;
4902 fct = MPT_TARGET_RESET;
4905 fct = MPT_CLEAR_ACA;
4908 fct = MPT_TERMINATE_TASK;
4911 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4913 mpt_scsi_tgt_status(mpt, 0, req,
4918 switch (fc->FcpCntl[1]) {
4920 tag_action = MSG_SIMPLE_Q_TAG;
4923 tag_action = MSG_HEAD_OF_Q_TAG;
4926 tag_action = MSG_ORDERED_Q_TAG;
4930 * Bah. Ignore Untagged Queing and ACA
4932 tag_action = MSG_SIMPLE_Q_TAG;
4936 tgt->resid = be32toh(fc->FcpDl);
4938 lunptr = fc->FcpLun;
4939 itag = be16toh(fc->OptionalOxid);
4940 } else if (mpt->is_sas) {
4941 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4942 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4944 lunptr = ssp->LogicalUnitNumber;
4945 itag = ssp->InitiatorTag;
4947 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4948 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4950 lunptr = sp->LogicalUnitNumber;
4955 * Generate a simple lun
4957 switch (lunptr[0] & 0xc0) {
4959 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
4965 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
4971 * Deal with non-enabled or bad luns here.
4973 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4974 mpt->trt[lun].enabled == 0) {
4975 if (mpt->twildcard) {
4976 trtp = &mpt->trt_wildcard;
4977 } else if (fct == MPT_NIL_TMT_VALUE) {
4979 * In this case, we haven't got an upstream listener
4980 * for either a specific lun or wildcard luns. We
4981 * have to make some sensible response. For regular
4982 * inquiry, just return some NOT HERE inquiry data.
4983 * For VPD inquiry, report illegal field in cdb.
4984 * For REQUEST SENSE, just return NO SENSE data.
4985 * REPORT LUNS gets illegal command.
4986 * All other commands get 'no such device'.
4988 uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
4991 memset(buf, 0, MPT_SENSE_SIZE);
4992 cond = SCSI_STATUS_CHECK_COND;
4997 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5007 len = min(tgt->resid, cdbp[4]);
5008 len = min(len, sizeof (null_iqd));
5009 mpt_lprt(mpt, MPT_PRT_DEBUG,
5010 "local inquiry %ld bytes\n", (long) len);
5011 mpt_scsi_tgt_local(mpt, req, lun, 1,
5018 len = min(tgt->resid, cdbp[4]);
5019 len = min(len, sizeof (buf));
5020 mpt_lprt(mpt, MPT_PRT_DEBUG,
5021 "local reqsense %ld bytes\n", (long) len);
5022 mpt_scsi_tgt_local(mpt, req, lun, 1,
5027 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
5031 mpt_lprt(mpt, MPT_PRT_DEBUG,
5032 "CMD 0x%x to unmanaged lun %u\n",
5037 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
5040 /* otherwise, leave trtp NULL */
5042 trtp = &mpt->trt[lun];
5046 * Deal with any task management
5048 if (fct != MPT_NIL_TMT_VALUE) {
5050 mpt_prt(mpt, "task mgmt function %x but no listener\n",
5052 mpt_scsi_tgt_status(mpt, 0, req,
5055 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5056 GET_INITIATOR_INDEX(reply_desc));
5062 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5063 if (atiop == NULL) {
5064 mpt_lprt(mpt, MPT_PRT_WARN,
5065 "no ATIOs for lun %u- sending back %s\n", lun,
5066 mpt->tenabled? "QUEUE FULL" : "BUSY");
5067 mpt_scsi_tgt_status(mpt, NULL, req,
5068 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5072 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5073 mpt_lprt(mpt, MPT_PRT_DEBUG1,
5074 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
5075 atiop->ccb_h.ccb_mpt_ptr = mpt;
5076 atiop->ccb_h.status = CAM_CDB_RECVD;
5077 atiop->ccb_h.target_lun = lun;
5078 atiop->sense_len = 0;
5079 atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5080 atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
5081 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5084 * The tag we construct here allows us to find the
5085 * original request that the command came in with.
5087 * This way we don't have to depend on anything but the
5088 * tag to find things when CCBs show back up from CAM.
5090 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5091 tgt->tag_id = atiop->tag_id;
5093 atiop->tag_action = tag_action;
5094 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
5096 if (mpt->verbose >= MPT_PRT_DEBUG) {
5098 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
5099 atiop->ccb_h.target_lun);
5100 for (i = 0; i < atiop->cdb_len; i++) {
5101 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5102 (i == (atiop->cdb_len - 1))? '>' : ' ');
5104 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5105 itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
5108 xpt_done((union ccb *)atiop);
5112 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5114 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5116 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5117 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
5118 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
5119 tgt->tag_id, tgt->state);
5123 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5126 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5127 req->index, req->index, req->state);
5128 mpt_tgt_dump_tgt_state(mpt, req);
5132 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5133 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5139 if (reply_frame == NULL) {
5141 * Figure out what the state of the command is.
5143 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5146 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5148 mpt_req_not_spcl(mpt, tgt->req,
5149 "turbo scsi_tgt_reply associated req", __LINE__);
5152 switch(tgt->state) {
5153 case TGT_STATE_LOADED:
5155 * This is a new command starting.
5157 mpt_scsi_tgt_atio(mpt, req, reply_desc);
5159 case TGT_STATE_MOVING_DATA:
5161 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
5164 if (tgt->req == NULL) {
5165 panic("mpt: turbo target reply with null "
5166 "associated request moving data");
5170 if (tgt->is_local == 0) {
5171 panic("mpt: turbo target reply with "
5172 "null associated ccb moving data");
5175 mpt_lprt(mpt, MPT_PRT_DEBUG,
5176 "TARGET_ASSIST local done\n");
5177 TAILQ_REMOVE(&mpt->request_pending_list,
5179 mpt_free_request(mpt, tgt->req);
5181 mpt_scsi_tgt_status(mpt, NULL, req,
5187 mpt_req_untimeout(req, mpt_timeout, ccb);
5188 mpt_lprt(mpt, MPT_PRT_DEBUG,
5189 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5190 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5192 * Free the Target Assist Request
5194 KASSERT(tgt->req->ccb == ccb,
5195 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5196 tgt->req->serno, tgt->req->ccb));
5197 TAILQ_REMOVE(&mpt->request_pending_list,
5199 mpt_free_request(mpt, tgt->req);
5203 * Do we need to send status now? That is, are
5204 * we done with all our data transfers?
5206 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5207 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5208 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5209 KASSERT(ccb->ccb_h.status,
5210 ("zero ccb sts at %d", __LINE__));
5211 tgt->state = TGT_STATE_IN_CAM;
5212 if (mpt->outofbeer) {
5213 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5215 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5221 * Otherwise, send status (and sense)
5223 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5225 memcpy(sp, &ccb->csio.sense_data,
5226 min(ccb->csio.sense_len, MPT_SENSE_SIZE));
5228 mpt_scsi_tgt_status(mpt, ccb, req,
5229 ccb->csio.scsi_status, sp);
5232 case TGT_STATE_SENDING_STATUS:
5233 case TGT_STATE_MOVING_DATA_AND_STATUS:
5238 if (tgt->req == NULL) {
5239 panic("mpt: turbo target reply with null "
5240 "associated request sending status");
5247 TGT_STATE_MOVING_DATA_AND_STATUS) {
5250 mpt_req_untimeout(req, mpt_timeout, ccb);
5251 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5252 ccb->ccb_h.status |= CAM_SENT_SENSE;
5254 mpt_lprt(mpt, MPT_PRT_DEBUG,
5255 "TARGET_STATUS tag %x sts %x flgs %x req "
5256 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5257 ccb->ccb_h.flags, tgt->req);
5259 * Free the Target Send Status Request
5261 KASSERT(tgt->req->ccb == ccb,
5262 ("tgt->req %p:%u tgt->req->ccb %p",
5263 tgt->req, tgt->req->serno, tgt->req->ccb));
5265 * Notify CAM that we're done
5267 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5268 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5269 KASSERT(ccb->ccb_h.status,
5270 ("ZERO ccb sts at %d", __LINE__));
5273 mpt_lprt(mpt, MPT_PRT_DEBUG,
5274 "TARGET_STATUS non-CAM for req %p:%u\n",
5275 tgt->req, tgt->req->serno);
5277 TAILQ_REMOVE(&mpt->request_pending_list,
5279 mpt_free_request(mpt, tgt->req);
5283 * And re-post the Command Buffer.
5284 * This will reset the state.
5286 ioindex = GET_IO_INDEX(reply_desc);
5287 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5289 mpt_post_target_command(mpt, req, ioindex);
5292 * And post a done for anyone who cares
5295 if (mpt->outofbeer) {
5296 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5298 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5304 case TGT_STATE_NIL: /* XXX This Never Happens XXX */
5305 tgt->state = TGT_STATE_LOADED;
5308 mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5309 "Reply Function\n", tgt->state);
5314 status = le16toh(reply_frame->IOCStatus);
5315 if (status != MPI_IOCSTATUS_SUCCESS) {
5316 dbg = MPT_PRT_ERROR;
5318 dbg = MPT_PRT_DEBUG1;
5322 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5323 req, req->serno, reply_frame, reply_frame->Function, status);
5325 switch (reply_frame->Function) {
5326 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5328 mpt_tgt_state_t *tgt;
5330 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5332 if (status != MPI_IOCSTATUS_SUCCESS) {
5338 tgt = MPT_TGT_STATE(mpt, req);
5339 KASSERT(tgt->state == TGT_STATE_LOADING,
5340 ("bad state 0x%x on reply to buffer post", tgt->state));
5341 mpt_assign_serno(mpt, req);
5342 tgt->state = TGT_STATE_LOADED;
5345 case MPI_FUNCTION_TARGET_ASSIST:
5347 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5349 mpt_prt(mpt, "target assist completion\n");
5350 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5351 mpt_free_request(mpt, req);
5353 case MPI_FUNCTION_TARGET_STATUS_SEND:
5355 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5357 mpt_prt(mpt, "status send completion\n");
5358 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5359 mpt_free_request(mpt, req);
5361 case MPI_FUNCTION_TARGET_MODE_ABORT:
5363 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5364 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5365 PTR_MSG_TARGET_MODE_ABORT abtp =
5366 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5367 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5369 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5371 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5372 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5373 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5374 mpt_free_request(mpt, req);
5378 mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5379 "0x%x\n", reply_frame->Function);