2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
5 * Copyright (c) 2000, 2001 by Greg Ansley
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
61 * Support from LSI-Logic has also gone a great deal toward making this a
62 * workable subsystem and is gratefully acknowledged.
65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66 * Copyright (c) 2005, WHEEL Sp. z o.o.
67 * Copyright (c) 2004, 2005 Justin T. Gibbs
68 * All rights reserved.
70 * Redistribution and use in source and binary forms, with or without
71 * modification, are permitted provided that the following conditions are
73 * 1. Redistributions of source code must retain the above copyright
74 * notice, this list of conditions and the following disclaimer.
75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76 * substantially similar to the "NO WARRANTY" disclaimer below
77 * ("Disclaimer") and any redistribution must be conditioned upon including
78 * a substantially similar Disclaimer requirement for further binary
80 * 3. Neither the names of the above listed copyright holders nor the names
81 * of any contributors may be used to endorse or promote products derived
82 * from this software without specific prior written permission.
84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96 #include <sys/cdefs.h>
97 __FBSDID("$FreeBSD$");
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 #include "dev/mpt/mpilib/mpi_sas.h"
108 #if __FreeBSD_version >= 500000
109 #include <sys/sysctl.h>
111 #include <sys/callout.h>
112 #include <sys/kthread.h>
114 #if __FreeBSD_version >= 700025
115 #ifndef CAM_NEW_TRAN_CODE
116 #define CAM_NEW_TRAN_CODE 1
120 static void mpt_poll(struct cam_sim *);
121 static timeout_t mpt_timeout;
122 static void mpt_action(struct cam_sim *, union ccb *);
124 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
125 static void mpt_setwidth(struct mpt_softc *, int, int);
126 static void mpt_setsync(struct mpt_softc *, int, int, int);
127 static int mpt_update_spi_config(struct mpt_softc *, int);
128 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
130 static mpt_reply_handler_t mpt_scsi_reply_handler;
131 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
132 static mpt_reply_handler_t mpt_fc_els_reply_handler;
133 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
134 MSG_DEFAULT_REPLY *);
135 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
136 static int mpt_fc_reset_link(struct mpt_softc *, int);
138 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
139 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
140 static void mpt_recovery_thread(void *arg);
141 static void mpt_recover_commands(struct mpt_softc *mpt);
143 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
144 u_int, u_int, u_int, int);
146 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
147 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
148 static int mpt_add_els_buffers(struct mpt_softc *mpt);
149 static int mpt_add_target_commands(struct mpt_softc *mpt);
150 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
151 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
152 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
153 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
154 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
155 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
156 uint8_t, uint8_t const *);
158 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
159 tgt_resource_t *, int);
160 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
161 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
162 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
163 static mpt_reply_handler_t mpt_sata_pass_reply_handler;
165 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
166 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
167 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
168 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
170 static mpt_probe_handler_t mpt_cam_probe;
171 static mpt_attach_handler_t mpt_cam_attach;
172 static mpt_enable_handler_t mpt_cam_enable;
173 static mpt_ready_handler_t mpt_cam_ready;
174 static mpt_event_handler_t mpt_cam_event;
175 static mpt_reset_handler_t mpt_cam_ioc_reset;
176 static mpt_detach_handler_t mpt_cam_detach;
178 static struct mpt_personality mpt_cam_personality =
181 .probe = mpt_cam_probe,
182 .attach = mpt_cam_attach,
183 .enable = mpt_cam_enable,
184 .ready = mpt_cam_ready,
185 .event = mpt_cam_event,
186 .reset = mpt_cam_ioc_reset,
187 .detach = mpt_cam_detach,
190 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
191 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
193 int mpt_enable_sata_wc = -1;
194 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
197 mpt_cam_probe(struct mpt_softc *mpt)
202 * Only attach to nodes that support the initiator or target role
203 * (or want to) or have RAID physical devices that need CAM pass-thru
206 if (mpt->do_cfg_role) {
207 role = mpt->cfg_role;
211 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
212 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
219 mpt_cam_attach(struct mpt_softc *mpt)
221 struct cam_devq *devq;
222 mpt_handler_t handler;
227 TAILQ_INIT(&mpt->request_timeout_list);
228 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
229 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
231 handler.reply_handler = mpt_scsi_reply_handler;
232 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
233 &scsi_io_handler_id);
239 handler.reply_handler = mpt_scsi_tmf_reply_handler;
240 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
241 &scsi_tmf_handler_id);
248 * If we're fibre channel and could support target mode, we register
249 * an ELS reply handler and give it resources.
251 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
252 handler.reply_handler = mpt_fc_els_reply_handler;
253 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
259 if (mpt_add_els_buffers(mpt) == FALSE) {
264 maxq -= mpt->els_cmds_allocated;
268 * If we support target mode, we register a reply handler for it,
269 * but don't add command resources until we actually enable target
272 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
273 handler.reply_handler = mpt_scsi_tgt_reply_handler;
274 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
275 &mpt->scsi_tgt_handler_id);
283 handler.reply_handler = mpt_sata_pass_reply_handler;
284 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
285 &sata_pass_handler_id);
293 * We keep one request reserved for timeout TMF requests.
295 mpt->tmf_req = mpt_get_request(mpt, FALSE);
296 if (mpt->tmf_req == NULL) {
297 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
304 * Mark the request as free even though not on the free list.
305 * There is only one TMF request allowed to be outstanding at
306 * a time and the TMF routines perform their own allocation
307 * tracking using the standard state flags.
309 mpt->tmf_req->state = REQ_STATE_FREE;
313 * The rest of this is CAM foo, for which we need to drop our lock
317 if (mpt_spawn_recovery_thread(mpt) != 0) {
318 mpt_prt(mpt, "Unable to spawn recovery thread!\n");
324 * Create the device queue for our SIM(s).
326 devq = cam_simq_alloc(maxq);
328 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
334 * Construct our SIM entry.
337 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
338 if (mpt->sim == NULL) {
339 mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
346 * Register exactly this bus.
349 if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
350 mpt_prt(mpt, "Bus registration Failed!\n");
356 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
357 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
358 mpt_prt(mpt, "Unable to allocate Path!\n");
366 * Only register a second bus for RAID physical
367 * devices if the controller supports RAID.
369 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
374 * Create a "bus" to export all hidden disks to CAM.
377 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
378 if (mpt->phydisk_sim == NULL) {
379 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
388 if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
390 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
396 if (xpt_create_path(&mpt->phydisk_path, NULL,
397 cam_sim_path(mpt->phydisk_sim),
398 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
399 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
405 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
414 * Read FC configuration information
417 mpt_read_config_info_fc(struct mpt_softc *mpt)
419 char *topology = NULL;
422 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
423 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
427 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
428 mpt->mpt_fcport_page0.Header.PageVersion,
429 mpt->mpt_fcport_page0.Header.PageLength,
430 mpt->mpt_fcport_page0.Header.PageNumber,
431 mpt->mpt_fcport_page0.Header.PageType);
434 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
435 sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
437 mpt_prt(mpt, "failed to read FC Port Page 0\n");
440 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
442 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
444 switch (mpt->mpt_fcport_page0.Flags &
445 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
446 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
447 mpt->mpt_fcport_speed = 0;
448 topology = "<NO LOOP>";
450 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
453 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
454 topology = "NL-Port";
456 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
459 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
460 topology = "FL-Port";
463 mpt->mpt_fcport_speed = 0;
468 mpt_lprt(mpt, MPT_PRT_INFO,
469 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
470 "Speed %u-Gbit\n", topology,
471 mpt->mpt_fcport_page0.WWNN.High,
472 mpt->mpt_fcport_page0.WWNN.Low,
473 mpt->mpt_fcport_page0.WWPN.High,
474 mpt->mpt_fcport_page0.WWPN.Low,
475 mpt->mpt_fcport_speed);
476 #if __FreeBSD_version >= 500000
479 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
480 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
482 snprintf(mpt->scinfo.fc.wwnn,
483 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
484 mpt->mpt_fcport_page0.WWNN.High,
485 mpt->mpt_fcport_page0.WWNN.Low);
487 snprintf(mpt->scinfo.fc.wwpn,
488 sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
489 mpt->mpt_fcport_page0.WWPN.High,
490 mpt->mpt_fcport_page0.WWPN.Low);
492 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
493 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
494 "World Wide Node Name");
496 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
497 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
498 "World Wide Port Name");
507 * Set FC configuration information.
510 mpt_set_initial_config_fc(struct mpt_softc *mpt)
512 CONFIG_PAGE_FC_PORT_1 fc;
517 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
518 &fc.Header, FALSE, 5000);
520 mpt_prt(mpt, "failed to read FC page 1 header\n");
521 return (mpt_fc_reset_link(mpt, 1));
524 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
525 &fc.Header, sizeof (fc), FALSE, 5000);
527 mpt_prt(mpt, "failed to read FC page 1\n");
528 return (mpt_fc_reset_link(mpt, 1));
530 mpt2host_config_page_fc_port_1(&fc);
533 * Check our flags to make sure we support the role we want.
539 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
540 role |= MPT_ROLE_INITIATOR;
542 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
543 role |= MPT_ROLE_TARGET;
546 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
548 if (mpt->do_cfg_role == 0) {
549 role = mpt->cfg_role;
551 mpt->do_cfg_role = 0;
554 if (role != mpt->cfg_role) {
555 if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
556 if ((role & MPT_ROLE_INITIATOR) == 0) {
557 mpt_prt(mpt, "adding initiator role\n");
558 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
561 mpt_prt(mpt, "keeping initiator role\n");
563 } else if (role & MPT_ROLE_INITIATOR) {
564 mpt_prt(mpt, "removing initiator role\n");
567 if (mpt->cfg_role & MPT_ROLE_TARGET) {
568 if ((role & MPT_ROLE_TARGET) == 0) {
569 mpt_prt(mpt, "adding target role\n");
570 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
573 mpt_prt(mpt, "keeping target role\n");
575 } else if (role & MPT_ROLE_TARGET) {
576 mpt_prt(mpt, "removing target role\n");
579 mpt->role = mpt->cfg_role;
582 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
583 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
584 mpt_prt(mpt, "adding OXID option\n");
585 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
592 host2mpt_config_page_fc_port_1(&fc);
593 r = mpt_write_cfg_page(mpt,
594 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
595 sizeof(fc), FALSE, 5000);
597 mpt_prt(mpt, "failed to update NVRAM with changes\n");
600 mpt_prt(mpt, "NOTE: NVRAM changes will not take "
601 "effect until next reboot or IOC reset\n");
607 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
609 ConfigExtendedPageHeader_t hdr;
610 struct mptsas_phyinfo *phyinfo;
611 SasIOUnitPage0_t *buffer;
614 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
615 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
619 if (hdr.ExtPageLength == 0) {
624 len = hdr.ExtPageLength * 4;
625 buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
626 if (buffer == NULL) {
631 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
632 0, &hdr, buffer, len, 0, 10000);
634 free(buffer, M_DEVBUF);
638 portinfo->num_phys = buffer->NumPhys;
639 portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) *
640 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
641 if (portinfo->phy_info == NULL) {
642 free(buffer, M_DEVBUF);
647 for (i = 0; i < portinfo->num_phys; i++) {
648 phyinfo = &portinfo->phy_info[i];
649 phyinfo->phy_num = i;
650 phyinfo->port_id = buffer->PhyData[i].Port;
651 phyinfo->negotiated_link_rate =
652 buffer->PhyData[i].NegotiatedLinkRate;
654 le16toh(buffer->PhyData[i].ControllerDevHandle);
657 free(buffer, M_DEVBUF);
663 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
664 uint32_t form, uint32_t form_specific)
666 ConfigExtendedPageHeader_t hdr;
667 SasPhyPage0_t *buffer;
670 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
671 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
675 if (hdr.ExtPageLength == 0) {
680 buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
681 if (buffer == NULL) {
686 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
687 form + form_specific, &hdr, buffer,
688 sizeof(SasPhyPage0_t), 0, 10000);
690 free(buffer, M_DEVBUF);
694 phy_info->hw_link_rate = buffer->HwLinkRate;
695 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
696 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
697 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
699 free(buffer, M_DEVBUF);
705 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
706 uint32_t form, uint32_t form_specific)
708 ConfigExtendedPageHeader_t hdr;
709 SasDevicePage0_t *buffer;
710 uint64_t sas_address;
713 bzero(device_info, sizeof(*device_info));
714 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
715 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
719 if (hdr.ExtPageLength == 0) {
724 buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
725 if (buffer == NULL) {
730 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
731 form + form_specific, &hdr, buffer,
732 sizeof(SasDevicePage0_t), 0, 10000);
734 free(buffer, M_DEVBUF);
738 device_info->dev_handle = le16toh(buffer->DevHandle);
739 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
740 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
741 device_info->slot = le16toh(buffer->Slot);
742 device_info->phy_num = buffer->PhyNum;
743 device_info->physical_port = buffer->PhysicalPort;
744 device_info->target_id = buffer->TargetID;
745 device_info->bus = buffer->Bus;
746 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
747 device_info->sas_address = le64toh(sas_address);
748 device_info->device_info = le32toh(buffer->DeviceInfo);
750 free(buffer, M_DEVBUF);
756 * Read SAS configuration information. Nothing to do yet.
759 mpt_read_config_info_sas(struct mpt_softc *mpt)
761 struct mptsas_portinfo *portinfo;
762 struct mptsas_phyinfo *phyinfo;
765 portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
766 if (portinfo == NULL)
769 error = mptsas_sas_io_unit_pg0(mpt, portinfo);
771 free(portinfo, M_DEVBUF);
775 for (i = 0; i < portinfo->num_phys; i++) {
776 phyinfo = &portinfo->phy_info[i];
777 error = mptsas_sas_phy_pg0(mpt, phyinfo,
778 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
779 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
782 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
783 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
784 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
788 phyinfo->identify.phy_num = phyinfo->phy_num = i;
789 if (phyinfo->attached.dev_handle)
790 error = mptsas_sas_device_pg0(mpt,
792 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
793 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
794 phyinfo->attached.dev_handle);
798 mpt->sas_portinfo = portinfo;
803 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
806 SataPassthroughRequest_t *pass;
810 req = mpt_get_request(mpt, 0);
814 pass = req->req_vbuf;
815 bzero(pass, sizeof(SataPassthroughRequest_t));
816 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
817 pass->TargetID = devinfo->target_id;
818 pass->Bus = devinfo->bus;
819 pass->PassthroughFlags = 0;
820 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
821 pass->DataLength = 0;
822 pass->MsgContext = htole32(req->index | sata_pass_handler_id);
823 pass->CommandFIS[0] = 0x27;
824 pass->CommandFIS[1] = 0x80;
825 pass->CommandFIS[2] = 0xef;
826 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
827 pass->CommandFIS[7] = 0x40;
828 pass->CommandFIS[15] = 0x08;
830 mpt_check_doorbell(mpt);
831 mpt_send_cmd(mpt, req);
832 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
835 mpt_free_request(mpt, req);
836 printf("error %d sending passthrough\n", error);
840 status = le16toh(req->IOCStatus);
841 if (status != MPI_IOCSTATUS_SUCCESS) {
842 mpt_free_request(mpt, req);
843 printf("IOCSTATUS %d\n", status);
847 mpt_free_request(mpt, req);
851 * Set SAS configuration information. Nothing to do yet.
854 mpt_set_initial_config_sas(struct mpt_softc *mpt)
856 struct mptsas_phyinfo *phyinfo;
859 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
860 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
861 phyinfo = &mpt->sas_portinfo->phy_info[i];
862 if (phyinfo->attached.dev_handle == 0)
864 if ((phyinfo->attached.device_info &
865 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
868 device_printf(mpt->dev,
869 "%sabling SATA WC on phy %d\n",
870 (mpt_enable_sata_wc) ? "En" : "Dis", i);
871 mptsas_set_sata_wc(mpt, &phyinfo->attached,
880 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
881 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
885 if (reply_frame != NULL) {
886 req->IOCStatus = le16toh(reply_frame->IOCStatus);
888 req->state &= ~REQ_STATE_QUEUED;
889 req->state |= REQ_STATE_DONE;
890 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
891 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
893 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
895 * Whew- we can free this request (late completion)
897 mpt_free_request(mpt, req);
905 * Read SCSI configuration information
908 mpt_read_config_info_spi(struct mpt_softc *mpt)
912 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
913 &mpt->mpt_port_page0.Header, FALSE, 5000);
917 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
918 mpt->mpt_port_page0.Header.PageVersion,
919 mpt->mpt_port_page0.Header.PageLength,
920 mpt->mpt_port_page0.Header.PageNumber,
921 mpt->mpt_port_page0.Header.PageType);
923 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
924 &mpt->mpt_port_page1.Header, FALSE, 5000);
928 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
929 mpt->mpt_port_page1.Header.PageVersion,
930 mpt->mpt_port_page1.Header.PageLength,
931 mpt->mpt_port_page1.Header.PageNumber,
932 mpt->mpt_port_page1.Header.PageType);
934 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
935 &mpt->mpt_port_page2.Header, FALSE, 5000);
939 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
940 mpt->mpt_port_page2.Header.PageVersion,
941 mpt->mpt_port_page2.Header.PageLength,
942 mpt->mpt_port_page2.Header.PageNumber,
943 mpt->mpt_port_page2.Header.PageType);
945 for (i = 0; i < 16; i++) {
946 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
947 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
951 mpt_lprt(mpt, MPT_PRT_DEBUG,
952 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
953 mpt->mpt_dev_page0[i].Header.PageVersion,
954 mpt->mpt_dev_page0[i].Header.PageLength,
955 mpt->mpt_dev_page0[i].Header.PageNumber,
956 mpt->mpt_dev_page0[i].Header.PageType);
958 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
959 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
963 mpt_lprt(mpt, MPT_PRT_DEBUG,
964 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
965 mpt->mpt_dev_page1[i].Header.PageVersion,
966 mpt->mpt_dev_page1[i].Header.PageLength,
967 mpt->mpt_dev_page1[i].Header.PageNumber,
968 mpt->mpt_dev_page1[i].Header.PageType);
972 * At this point, we don't *have* to fail. As long as we have
973 * valid config header information, we can (barely) lurch
977 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
978 sizeof(mpt->mpt_port_page0), FALSE, 5000);
980 mpt_prt(mpt, "failed to read SPI Port Page 0\n");
982 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
983 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
984 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
985 mpt->mpt_port_page0.Capabilities,
986 mpt->mpt_port_page0.PhysicalInterface);
989 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
990 sizeof(mpt->mpt_port_page1), FALSE, 5000);
992 mpt_prt(mpt, "failed to read SPI Port Page 1\n");
994 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
995 mpt_lprt(mpt, MPT_PRT_DEBUG,
996 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
997 mpt->mpt_port_page1.Configuration,
998 mpt->mpt_port_page1.OnBusTimerValue);
1001 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
1002 sizeof(mpt->mpt_port_page2), FALSE, 5000);
1004 mpt_prt(mpt, "failed to read SPI Port Page 2\n");
1006 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1007 "Port Page 2: Flags %x Settings %x\n",
1008 mpt->mpt_port_page2.PortFlags,
1009 mpt->mpt_port_page2.PortSettings);
1010 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1011 for (i = 0; i < 16; i++) {
1012 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1013 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1014 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1015 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1016 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1020 for (i = 0; i < 16; i++) {
1021 rv = mpt_read_cur_cfg_page(mpt, i,
1022 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1026 "cannot read SPI Target %d Device Page 0\n", i);
1029 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1030 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1031 "target %d page 0: Negotiated Params %x Information %x\n",
1032 i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1033 mpt->mpt_dev_page0[i].Information);
1035 rv = mpt_read_cur_cfg_page(mpt, i,
1036 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1040 "cannot read SPI Target %d Device Page 1\n", i);
1043 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1044 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1045 "target %d page 1: Requested Params %x Configuration %x\n",
1046 i, mpt->mpt_dev_page1[i].RequestedParameters,
1047 mpt->mpt_dev_page1[i].Configuration);
1053 * Validate SPI configuration information.
1055 * In particular, validate SPI Port Page 1.
1058 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1060 int error, i, pp1val;
1062 mpt->mpt_disc_enable = 0xff;
1063 mpt->mpt_tag_enable = 0;
1065 pp1val = ((1 << mpt->mpt_ini_id) <<
1066 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
1067 if (mpt->mpt_port_page1.Configuration != pp1val) {
1068 CONFIG_PAGE_SCSI_PORT_1 tmp;
1070 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1071 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1072 tmp = mpt->mpt_port_page1;
1073 tmp.Configuration = pp1val;
1074 host2mpt_config_page_scsi_port_1(&tmp);
1075 error = mpt_write_cur_cfg_page(mpt, 0,
1076 &tmp.Header, sizeof(tmp), FALSE, 5000);
1080 error = mpt_read_cur_cfg_page(mpt, 0,
1081 &tmp.Header, sizeof(tmp), FALSE, 5000);
1085 mpt2host_config_page_scsi_port_1(&tmp);
1086 if (tmp.Configuration != pp1val) {
1088 "failed to reset SPI Port Page 1 Config value\n");
1091 mpt->mpt_port_page1 = tmp;
1095 * The purpose of this exercise is to get
1096 * all targets back to async/narrow.
1098 * We skip this step if the BIOS has already negotiated
1099 * speeds with the targets.
1101 i = mpt->mpt_port_page2.PortSettings &
1102 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1103 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1104 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1105 "honoring BIOS transfer negotiations\n");
1107 for (i = 0; i < 16; i++) {
1108 mpt->mpt_dev_page1[i].RequestedParameters = 0;
1109 mpt->mpt_dev_page1[i].Configuration = 0;
1110 (void) mpt_update_spi_config(mpt, i);
1117 mpt_cam_enable(struct mpt_softc *mpt)
1125 if (mpt_read_config_info_fc(mpt)) {
1128 if (mpt_set_initial_config_fc(mpt)) {
1131 } else if (mpt->is_sas) {
1132 if (mpt_read_config_info_sas(mpt)) {
1135 if (mpt_set_initial_config_sas(mpt)) {
1138 } else if (mpt->is_spi) {
1139 if (mpt_read_config_info_spi(mpt)) {
1142 if (mpt_set_initial_config_spi(mpt)) {
1154 mpt_cam_ready(struct mpt_softc *mpt)
1158 * If we're in target mode, hang out resources now
1159 * so we don't cause the world to hang talking to us.
1161 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1163 * Try to add some target command resources
1166 if (mpt_add_target_commands(mpt) == FALSE) {
1167 mpt_prt(mpt, "failed to add target commands\n");
1175 mpt_cam_detach(struct mpt_softc *mpt)
1177 mpt_handler_t handler;
1181 mpt_terminate_recovery_thread(mpt);
1183 handler.reply_handler = mpt_scsi_reply_handler;
1184 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1185 scsi_io_handler_id);
1186 handler.reply_handler = mpt_scsi_tmf_reply_handler;
1187 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1188 scsi_tmf_handler_id);
1189 handler.reply_handler = mpt_fc_els_reply_handler;
1190 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1192 handler.reply_handler = mpt_scsi_tgt_reply_handler;
1193 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1194 mpt->scsi_tgt_handler_id);
1195 handler.reply_handler = mpt_sata_pass_reply_handler;
1196 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1197 sata_pass_handler_id);
1199 if (mpt->tmf_req != NULL) {
1200 mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1201 mpt_free_request(mpt, mpt->tmf_req);
1202 mpt->tmf_req = NULL;
1204 if (mpt->sas_portinfo != NULL) {
1205 free(mpt->sas_portinfo, M_DEVBUF);
1206 mpt->sas_portinfo = NULL;
1209 if (mpt->sim != NULL) {
1210 xpt_free_path(mpt->path);
1211 xpt_bus_deregister(cam_sim_path(mpt->sim));
1212 cam_sim_free(mpt->sim, TRUE);
1216 if (mpt->phydisk_sim != NULL) {
1217 xpt_free_path(mpt->phydisk_path);
1218 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1219 cam_sim_free(mpt->phydisk_sim, TRUE);
1220 mpt->phydisk_sim = NULL;
1225 /* This routine is used after a system crash to dump core onto the swap device.
1228 mpt_poll(struct cam_sim *sim)
1230 struct mpt_softc *mpt;
1232 mpt = (struct mpt_softc *)cam_sim_softc(sim);
1237 * Watchdog timeout routine for SCSI requests.
1240 mpt_timeout(void *arg)
1243 struct mpt_softc *mpt;
1246 ccb = (union ccb *)arg;
1247 mpt = ccb->ccb_h.ccb_mpt_ptr;
1249 #if __FreeBSD_version < 500000
1252 MPT_LOCK_ASSERT(mpt);
1253 req = ccb->ccb_h.ccb_req_ptr;
1254 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1255 req->serno, ccb, req->ccb);
1256 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1257 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1258 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1259 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1260 req->state |= REQ_STATE_TIMEDOUT;
1261 mpt_wakeup_recovery_thread(mpt);
1263 #if __FreeBSD_version < 500000
1269 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
1271 * Takes a list of physical segments and builds the SGL for SCSI IO command
1272 * and forwards the commard to the IOC after one last check that CAM has not
1273 * aborted the transaction.
1276 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1278 request_t *req, *trq;
1281 struct mpt_softc *mpt;
1283 uint32_t flags, nxt_off;
1285 MSG_REQUEST_HEADER *hdrp;
1290 req = (request_t *)arg;
1293 mpt = ccb->ccb_h.ccb_mpt_ptr;
1294 req = ccb->ccb_h.ccb_req_ptr;
1296 hdrp = req->req_vbuf;
1297 mpt_off = req->req_vbuf;
1299 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1304 switch (hdrp->Function) {
1305 case MPI_FUNCTION_SCSI_IO_REQUEST:
1306 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1308 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1310 case MPI_FUNCTION_TARGET_ASSIST:
1312 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1315 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1322 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1324 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1325 nseg, mpt->max_seg_cnt);
1330 if (error != EFBIG && error != ENOMEM) {
1331 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1333 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1335 mpt_freeze_ccb(ccb);
1336 if (error == EFBIG) {
1337 status = CAM_REQ_TOO_BIG;
1338 } else if (error == ENOMEM) {
1339 if (mpt->outofbeer == 0) {
1341 xpt_freeze_simq(mpt->sim, 1);
1342 mpt_lprt(mpt, MPT_PRT_DEBUG,
1345 status = CAM_REQUEUE_REQ;
1347 status = CAM_REQ_CMP_ERR;
1349 mpt_set_ccb_status(ccb, status);
1351 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1352 request_t *cmd_req =
1353 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1354 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1355 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1356 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1358 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1359 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1361 CAMLOCK_2_MPTLOCK(mpt);
1362 mpt_free_request(mpt, req);
1363 MPTLOCK_2_CAMLOCK(mpt);
1368 * No data to transfer?
1369 * Just make a single simple SGL with zero length.
1372 if (mpt->verbose >= MPT_PRT_DEBUG) {
1373 int tidx = ((char *)sglp) - mpt_off;
1374 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1378 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1379 MPI_pSGE_SET_FLAGS(se1,
1380 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1381 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1382 se1->FlagsLength = htole32(se1->FlagsLength);
1387 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1389 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1390 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1393 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1394 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1398 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1399 bus_dmasync_op_t op;
1401 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1402 op = BUS_DMASYNC_PREREAD;
1404 op = BUS_DMASYNC_PREWRITE;
1407 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1408 op = BUS_DMASYNC_PREWRITE;
1410 op = BUS_DMASYNC_PREREAD;
1413 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1417 * Okay, fill in what we can at the end of the command frame.
1418 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1419 * the command frame.
1421 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1422 * SIMPLE64 pointers and start doing CHAIN64 entries after
1426 if (nseg < MPT_NSGL_FIRST(mpt)) {
1430 * Leave room for CHAIN element
1432 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1435 se = (SGE_SIMPLE64 *) sglp;
1436 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1439 memset(se, 0, sizeof (*se));
1440 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1441 if (sizeof(bus_addr_t) > 4) {
1443 htole32(((uint64_t)dm_segs->ds_addr) >> 32);
1445 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1447 if (seg == first_lim - 1) {
1448 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1450 if (seg == nseg - 1) {
1451 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1452 MPI_SGE_FLAGS_END_OF_BUFFER;
1454 MPI_pSGE_SET_FLAGS(se, tf);
1455 se->FlagsLength = htole32(se->FlagsLength);
1463 * Tell the IOC where to find the first chain element.
1465 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1466 nxt_off = MPT_RQSL(mpt);
1470 * Make up the rest of the data segments out of a chain element
1471 * (contiained in the current request frame) which points to
1472 * SIMPLE64 elements in the next request frame, possibly ending
1473 * with *another* chain element (if there's more).
1475 while (seg < nseg) {
1477 uint32_t tf, cur_off;
1478 bus_addr_t chain_list_addr;
1481 * Point to the chain descriptor. Note that the chain
1482 * descriptor is at the end of the *previous* list (whether
1485 ce = (SGE_CHAIN64 *) se;
1488 * Before we change our current pointer, make sure we won't
1489 * overflow the request area with this frame. Note that we
1490 * test against 'greater than' here as it's okay in this case
1491 * to have next offset be just outside the request area.
1493 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1494 nxt_off = MPT_REQUEST_AREA;
1499 * Set our SGE element pointer to the beginning of the chain
1500 * list and update our next chain list offset.
1502 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1504 nxt_off += MPT_RQSL(mpt);
1507 * Now initialized the chain descriptor.
1509 memset(ce, 0, sizeof (*ce));
1512 * Get the physical address of the chain list.
1514 chain_list_addr = trq->req_pbuf;
1515 chain_list_addr += cur_off;
1516 if (sizeof (bus_addr_t) > 4) {
1518 htole32(((uint64_t)chain_list_addr) >> 32);
1520 ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1521 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1522 MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1525 * If we have more than a frame's worth of segments left,
1526 * set up the chain list to have the last element be another
1529 if ((nseg - seg) > MPT_NSGL(mpt)) {
1530 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1532 * The length of the chain is the length in bytes of the
1533 * number of segments plus the next chain element.
1535 * The next chain descriptor offset is the length,
1536 * in words, of the number of segments.
1538 ce->Length = (this_seg_lim - seg) *
1539 sizeof (SGE_SIMPLE64);
1540 ce->NextChainOffset = ce->Length >> 2;
1541 ce->Length += sizeof (SGE_CHAIN64);
1543 this_seg_lim = nseg;
1544 ce->Length = (this_seg_lim - seg) *
1545 sizeof (SGE_SIMPLE64);
1547 ce->Length = htole16(ce->Length);
1550 * Fill in the chain list SGE elements with our segment data.
1552 * If we're the last element in this chain list, set the last
1553 * element flag. If we're the completely last element period,
1554 * set the end of list and end of buffer flags.
1556 while (seg < this_seg_lim) {
1557 memset(se, 0, sizeof (*se));
1558 se->Address.Low = htole32(dm_segs->ds_addr &
1560 if (sizeof (bus_addr_t) > 4) {
1562 htole32(((uint64_t)dm_segs->ds_addr) >> 32);
1564 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1566 if (seg == this_seg_lim - 1) {
1567 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1569 if (seg == nseg - 1) {
1570 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1571 MPI_SGE_FLAGS_END_OF_BUFFER;
1573 MPI_pSGE_SET_FLAGS(se, tf);
1574 se->FlagsLength = htole32(se->FlagsLength);
1582 * If we have more segments to do and we've used up all of
1583 * the space in a request area, go allocate another one
1584 * and chain to that.
1586 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1589 CAMLOCK_2_MPTLOCK(mpt);
1590 nrq = mpt_get_request(mpt, FALSE);
1591 MPTLOCK_2_CAMLOCK(mpt);
1599 * Append the new request area on the tail of our list.
1601 if ((trq = req->chain) == NULL) {
1604 while (trq->chain != NULL) {
1610 mpt_off = trq->req_vbuf;
1611 if (mpt->verbose >= MPT_PRT_DEBUG) {
1612 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1620 * Last time we need to check if this CCB needs to be aborted.
1622 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1623 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1624 request_t *cmd_req =
1625 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1626 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1627 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1628 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1631 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1632 ccb->ccb_h.status & CAM_STATUS_MASK);
1633 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1634 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1636 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1637 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1639 CAMLOCK_2_MPTLOCK(mpt);
1640 mpt_free_request(mpt, req);
1641 MPTLOCK_2_CAMLOCK(mpt);
1645 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1646 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1647 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
1650 if (mpt->verbose > MPT_PRT_DEBUG) {
1652 mpt_print_request(req->req_vbuf);
1653 for (trq = req->chain; trq; trq = trq->chain) {
1654 printf(" Additional Chain Area %d\n", nc++);
1655 mpt_dump_sgl(trq->req_vbuf, 0);
1659 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1660 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1661 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1662 #ifdef WE_TRUST_AUTO_GOOD_STATUS
1663 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1664 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1665 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1667 tgt->state = TGT_STATE_MOVING_DATA;
1670 tgt->state = TGT_STATE_MOVING_DATA;
1673 CAMLOCK_2_MPTLOCK(mpt);
1674 mpt_send_cmd(mpt, req);
1675 MPTLOCK_2_CAMLOCK(mpt);
1679 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1681 request_t *req, *trq;
1684 struct mpt_softc *mpt;
1686 uint32_t flags, nxt_off;
1688 MSG_REQUEST_HEADER *hdrp;
1693 req = (request_t *)arg;
1696 mpt = ccb->ccb_h.ccb_mpt_ptr;
1697 req = ccb->ccb_h.ccb_req_ptr;
1699 hdrp = req->req_vbuf;
1700 mpt_off = req->req_vbuf;
1703 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1708 switch (hdrp->Function) {
1709 case MPI_FUNCTION_SCSI_IO_REQUEST:
1710 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1711 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1713 case MPI_FUNCTION_TARGET_ASSIST:
1715 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1718 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1725 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1727 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1728 nseg, mpt->max_seg_cnt);
1733 if (error != EFBIG && error != ENOMEM) {
1734 mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1736 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1738 mpt_freeze_ccb(ccb);
1739 if (error == EFBIG) {
1740 status = CAM_REQ_TOO_BIG;
1741 } else if (error == ENOMEM) {
1742 if (mpt->outofbeer == 0) {
1744 xpt_freeze_simq(mpt->sim, 1);
1745 mpt_lprt(mpt, MPT_PRT_DEBUG,
1748 status = CAM_REQUEUE_REQ;
1750 status = CAM_REQ_CMP_ERR;
1752 mpt_set_ccb_status(ccb, status);
1754 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1755 request_t *cmd_req =
1756 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1757 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1758 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1759 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1761 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1762 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1764 CAMLOCK_2_MPTLOCK(mpt);
1765 mpt_free_request(mpt, req);
1766 MPTLOCK_2_CAMLOCK(mpt);
1771 * No data to transfer?
1772 * Just make a single simple SGL with zero length.
1775 if (mpt->verbose >= MPT_PRT_DEBUG) {
1776 int tidx = ((char *)sglp) - mpt_off;
1777 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1781 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1782 MPI_pSGE_SET_FLAGS(se1,
1783 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1784 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1785 se1->FlagsLength = htole32(se1->FlagsLength);
1790 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1792 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1793 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1796 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1797 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1801 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1802 bus_dmasync_op_t op;
1804 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1805 op = BUS_DMASYNC_PREREAD;
1807 op = BUS_DMASYNC_PREWRITE;
1810 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1811 op = BUS_DMASYNC_PREWRITE;
1813 op = BUS_DMASYNC_PREREAD;
1816 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1820 * Okay, fill in what we can at the end of the command frame.
1821 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1822 * the command frame.
1824 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1825 * SIMPLE32 pointers and start doing CHAIN32 entries after
1829 if (nseg < MPT_NSGL_FIRST(mpt)) {
1833 * Leave room for CHAIN element
1835 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1838 se = (SGE_SIMPLE32 *) sglp;
1839 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1842 memset(se, 0,sizeof (*se));
1843 se->Address = htole32(dm_segs->ds_addr);
1845 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1847 if (seg == first_lim - 1) {
1848 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1850 if (seg == nseg - 1) {
1851 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1852 MPI_SGE_FLAGS_END_OF_BUFFER;
1854 MPI_pSGE_SET_FLAGS(se, tf);
1855 se->FlagsLength = htole32(se->FlagsLength);
1863 * Tell the IOC where to find the first chain element.
1865 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1866 nxt_off = MPT_RQSL(mpt);
1870 * Make up the rest of the data segments out of a chain element
1871 * (contiained in the current request frame) which points to
1872 * SIMPLE32 elements in the next request frame, possibly ending
1873 * with *another* chain element (if there's more).
1875 while (seg < nseg) {
1877 uint32_t tf, cur_off;
1878 bus_addr_t chain_list_addr;
1881 * Point to the chain descriptor. Note that the chain
1882 * descriptor is at the end of the *previous* list (whether
1885 ce = (SGE_CHAIN32 *) se;
1888 * Before we change our current pointer, make sure we won't
1889 * overflow the request area with this frame. Note that we
1890 * test against 'greater than' here as it's okay in this case
1891 * to have next offset be just outside the request area.
1893 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1894 nxt_off = MPT_REQUEST_AREA;
1899 * Set our SGE element pointer to the beginning of the chain
1900 * list and update our next chain list offset.
1902 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1904 nxt_off += MPT_RQSL(mpt);
1907 * Now initialized the chain descriptor.
1909 memset(ce, 0, sizeof (*ce));
1912 * Get the physical address of the chain list.
1914 chain_list_addr = trq->req_pbuf;
1915 chain_list_addr += cur_off;
1919 ce->Address = htole32(chain_list_addr);
1920 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1924 * If we have more than a frame's worth of segments left,
1925 * set up the chain list to have the last element be another
1928 if ((nseg - seg) > MPT_NSGL(mpt)) {
1929 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1931 * The length of the chain is the length in bytes of the
1932 * number of segments plus the next chain element.
1934 * The next chain descriptor offset is the length,
1935 * in words, of the number of segments.
1937 ce->Length = (this_seg_lim - seg) *
1938 sizeof (SGE_SIMPLE32);
1939 ce->NextChainOffset = ce->Length >> 2;
1940 ce->Length += sizeof (SGE_CHAIN32);
1942 this_seg_lim = nseg;
1943 ce->Length = (this_seg_lim - seg) *
1944 sizeof (SGE_SIMPLE32);
1946 ce->Length = htole16(ce->Length);
1949 * Fill in the chain list SGE elements with our segment data.
1951 * If we're the last element in this chain list, set the last
1952 * element flag. If we're the completely last element period,
1953 * set the end of list and end of buffer flags.
1955 while (seg < this_seg_lim) {
1956 memset(se, 0, sizeof (*se));
1957 se->Address = htole32(dm_segs->ds_addr);
1959 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1961 if (seg == this_seg_lim - 1) {
1962 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1964 if (seg == nseg - 1) {
1965 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1966 MPI_SGE_FLAGS_END_OF_BUFFER;
1968 MPI_pSGE_SET_FLAGS(se, tf);
1969 se->FlagsLength = htole32(se->FlagsLength);
1977 * If we have more segments to do and we've used up all of
1978 * the space in a request area, go allocate another one
1979 * and chain to that.
1981 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1984 CAMLOCK_2_MPTLOCK(mpt);
1985 nrq = mpt_get_request(mpt, FALSE);
1986 MPTLOCK_2_CAMLOCK(mpt);
1994 * Append the new request area on the tail of our list.
1996 if ((trq = req->chain) == NULL) {
1999 while (trq->chain != NULL) {
2005 mpt_off = trq->req_vbuf;
2006 if (mpt->verbose >= MPT_PRT_DEBUG) {
2007 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
2015 * Last time we need to check if this CCB needs to be aborted.
2017 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2018 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2019 request_t *cmd_req =
2020 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2021 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2022 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2023 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2026 "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2027 ccb->ccb_h.status & CAM_STATUS_MASK);
2028 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2029 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2031 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2032 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2034 CAMLOCK_2_MPTLOCK(mpt);
2035 mpt_free_request(mpt, req);
2036 MPTLOCK_2_CAMLOCK(mpt);
2040 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2041 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2042 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
2045 if (mpt->verbose > MPT_PRT_DEBUG) {
2047 mpt_print_request(req->req_vbuf);
2048 for (trq = req->chain; trq; trq = trq->chain) {
2049 printf(" Additional Chain Area %d\n", nc++);
2050 mpt_dump_sgl(trq->req_vbuf, 0);
2054 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2055 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2056 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2057 #ifdef WE_TRUST_AUTO_GOOD_STATUS
2058 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2059 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2060 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2062 tgt->state = TGT_STATE_MOVING_DATA;
2065 tgt->state = TGT_STATE_MOVING_DATA;
2068 CAMLOCK_2_MPTLOCK(mpt);
2069 mpt_send_cmd(mpt, req);
2070 MPTLOCK_2_CAMLOCK(mpt);
2074 mpt_start(struct cam_sim *sim, union ccb *ccb)
2077 struct mpt_softc *mpt;
2078 MSG_SCSI_IO_REQUEST *mpt_req;
2079 struct ccb_scsiio *csio = &ccb->csio;
2080 struct ccb_hdr *ccbh = &ccb->ccb_h;
2081 bus_dmamap_callback_t *cb;
2085 /* Get the pointer for the physical addapter */
2086 mpt = ccb->ccb_h.ccb_mpt_ptr;
2087 raid_passthru = (sim == mpt->phydisk_sim);
2089 CAMLOCK_2_MPTLOCK(mpt);
2090 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2091 if (mpt->outofbeer == 0) {
2093 xpt_freeze_simq(mpt->sim, 1);
2094 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2096 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2097 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2098 MPTLOCK_2_CAMLOCK(mpt);
2103 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2105 MPTLOCK_2_CAMLOCK(mpt);
2107 if (sizeof (bus_addr_t) > 4) {
2108 cb = mpt_execute_req_a64;
2110 cb = mpt_execute_req;
2114 * Link the ccb and the request structure so we can find
2115 * the other knowing either the request or the ccb
2118 ccb->ccb_h.ccb_req_ptr = req;
2120 /* Now we build the command for the IOC */
2121 mpt_req = req->req_vbuf;
2122 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2124 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2125 if (raid_passthru) {
2126 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2127 CAMLOCK_2_MPTLOCK(mpt);
2128 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2129 MPTLOCK_2_CAMLOCK(mpt);
2130 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2131 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2135 MPTLOCK_2_CAMLOCK(mpt);
2136 mpt_req->Bus = 0; /* we never set bus here */
2138 tgt = ccb->ccb_h.target_id;
2139 mpt_req->Bus = 0; /* XXX */
2142 mpt_req->SenseBufferLength =
2143 (csio->sense_len < MPT_SENSE_SIZE) ?
2144 csio->sense_len : MPT_SENSE_SIZE;
2147 * We use the message context to find the request structure when we
2148 * Get the command completion interrupt from the IOC.
2150 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2152 /* Which physical device to do the I/O on */
2153 mpt_req->TargetID = tgt;
2155 /* We assume a single level LUN type */
2156 if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) {
2157 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
2158 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
2160 mpt_req->LUN[1] = ccb->ccb_h.target_lun;
2163 /* Set the direction of the transfer */
2164 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2165 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2166 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2167 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2169 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2172 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2173 switch(ccb->csio.tag_action) {
2174 case MSG_HEAD_OF_Q_TAG:
2175 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2178 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2180 case MSG_ORDERED_Q_TAG:
2181 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2183 case MSG_SIMPLE_Q_TAG:
2185 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2189 if (mpt->is_fc || mpt->is_sas) {
2190 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2192 /* XXX No such thing for a target doing packetized. */
2193 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2198 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2199 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2202 mpt_req->Control = htole32(mpt_req->Control);
2204 /* Copy the scsi command block into place */
2205 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2206 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2208 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2211 mpt_req->CDBLength = csio->cdb_len;
2212 mpt_req->DataLength = htole32(csio->dxfer_len);
2213 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2216 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2218 if (mpt->verbose == MPT_PRT_DEBUG) {
2220 mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2221 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2222 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2223 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2224 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2225 mpt_prtc(mpt, "(%s %u byte%s ",
2226 (df == MPI_SCSIIO_CONTROL_READ)?
2227 "read" : "write", csio->dxfer_len,
2228 (csio->dxfer_len == 1)? ")" : "s)");
2230 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
2231 ccb->ccb_h.target_lun, req, req->serno);
2235 * If we have any data to send with this command map it into bus space.
2237 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2238 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
2240 * We've been given a pointer to a single buffer.
2242 if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
2244 * Virtual address that needs to translated into
2245 * one or more physical address ranges.
2248 int s = splsoftvm();
2249 error = bus_dmamap_load(mpt->buffer_dmat,
2250 req->dmap, csio->data_ptr, csio->dxfer_len,
2253 if (error == EINPROGRESS) {
2255 * So as to maintain ordering,
2256 * freeze the controller queue
2257 * until our mapping is
2260 xpt_freeze_simq(mpt->sim, 1);
2261 ccbh->status |= CAM_RELEASE_SIMQ;
2265 * We have been given a pointer to single
2268 struct bus_dma_segment seg;
2270 (bus_addr_t)(vm_offset_t)csio->data_ptr;
2271 seg.ds_len = csio->dxfer_len;
2272 (*cb)(req, &seg, 1, 0);
2276 * We have been given a list of addresses.
2277 * This case could be easily supported but they are not
2278 * currently generated by the CAM subsystem so there
2279 * is no point in wasting the time right now.
2281 struct bus_dma_segment *segs;
2282 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
2283 (*cb)(req, NULL, 0, EFAULT);
2285 /* Just use the segments provided */
2286 segs = (struct bus_dma_segment *)csio->data_ptr;
2287 (*cb)(req, segs, csio->sglist_cnt, 0);
2291 (*cb)(req, NULL, 0, 0);
2296 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2303 error = mpt_scsi_send_tmf(mpt,
2304 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2305 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2306 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2307 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2308 0, /* XXX How do I get the channel ID? */
2309 tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2310 lun != CAM_LUN_WILDCARD ? lun : 0,
2315 * mpt_scsi_send_tmf hard resets on failure, so no
2316 * need to do so here.
2319 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2323 /* Wait for bus reset to be processed by the IOC. */
2324 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2325 REQ_STATE_DONE, sleep_ok, 5000);
2327 status = le16toh(mpt->tmf_req->IOCStatus);
2328 response = mpt->tmf_req->ResponseCode;
2329 mpt->tmf_req->state = REQ_STATE_FREE;
2332 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2333 "Resetting controller.\n");
2334 mpt_reset(mpt, TRUE);
2338 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2339 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2340 "Resetting controller.\n", status);
2341 mpt_reset(mpt, TRUE);
2345 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2346 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2347 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2348 "Resetting controller.\n", response);
2349 mpt_reset(mpt, TRUE);
2356 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2360 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2362 req = mpt_get_request(mpt, FALSE);
2367 memset(fc, 0, sizeof(*fc));
2368 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2369 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2370 fc->MsgContext = htole32(req->index | fc_els_handler_id);
2371 mpt_send_cmd(mpt, req);
2373 r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2374 REQ_STATE_DONE, FALSE, 60 * 1000);
2376 mpt_free_request(mpt, req);
2383 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2384 MSG_EVENT_NOTIFY_REPLY *msg)
2386 uint32_t data0, data1;
2388 data0 = le32toh(msg->Data[0]);
2389 data1 = le32toh(msg->Data[1]);
2390 switch(msg->Event & 0xFF) {
2391 case MPI_EVENT_UNIT_ATTENTION:
2392 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2393 (data0 >> 8) & 0xff, data0 & 0xff);
2396 case MPI_EVENT_IOC_BUS_RESET:
2397 /* We generated a bus reset */
2398 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2399 (data0 >> 8) & 0xff);
2400 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2403 case MPI_EVENT_EXT_BUS_RESET:
2404 /* Someone else generated a bus reset */
2405 mpt_prt(mpt, "External Bus Reset Detected\n");
2407 * These replies don't return EventData like the MPI
2410 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2413 case MPI_EVENT_RESCAN:
2414 #if __FreeBSD_version >= 600000
2419 * In general this means a device has been added to the loop.
2421 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2422 if (mpt->ready == 0) {
2425 if (mpt->phydisk_sim) {
2426 pathid = cam_sim_path(mpt->phydisk_sim);
2428 pathid = cam_sim_path(mpt->sim);
2430 MPTLOCK_2_CAMLOCK(mpt);
2432 * Allocate a CCB, create a wildcard path for this bus,
2433 * and schedule a rescan.
2435 ccb = xpt_alloc_ccb_nowait();
2437 mpt_prt(mpt, "unable to alloc CCB for rescan\n");
2438 CAMLOCK_2_MPTLOCK(mpt);
2442 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
2443 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2444 CAMLOCK_2_MPTLOCK(mpt);
2445 mpt_prt(mpt, "unable to create path for rescan\n");
2450 CAMLOCK_2_MPTLOCK(mpt);
2454 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2457 case MPI_EVENT_LINK_STATUS_CHANGE:
2458 mpt_prt(mpt, "Port %d: LinkState: %s\n",
2459 (data1 >> 8) & 0xff,
2460 ((data0 & 0xff) == 0)? "Failed" : "Active");
2463 case MPI_EVENT_LOOP_STATE_CHANGE:
2464 switch ((data0 >> 16) & 0xff) {
2467 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2468 "(Loop Initialization)\n",
2469 (data1 >> 8) & 0xff,
2470 (data0 >> 8) & 0xff,
2472 switch ((data0 >> 8) & 0xff) {
2474 if ((data0 & 0xff) == 0xF7) {
2475 mpt_prt(mpt, "Device needs AL_PA\n");
2477 mpt_prt(mpt, "Device %02x doesn't like "
2483 if ((data0 & 0xff) == 0xF7) {
2484 mpt_prt(mpt, "Device had loop failure "
2485 "at its receiver prior to acquiring"
2488 mpt_prt(mpt, "Device %02x detected loop"
2489 " failure at its receiver\n",
2494 mpt_prt(mpt, "Device %02x requests that device "
2495 "%02x reset itself\n",
2497 (data0 >> 8) & 0xFF);
2502 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2503 "LPE(%02x,%02x) (Loop Port Enable)\n",
2504 (data1 >> 8) & 0xff, /* Port */
2505 (data0 >> 8) & 0xff, /* Character 3 */
2506 (data0 ) & 0xff /* Character 4 */);
2509 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2510 "LPB(%02x,%02x) (Loop Port Bypass)\n",
2511 (data1 >> 8) & 0xff, /* Port */
2512 (data0 >> 8) & 0xff, /* Character 3 */
2513 (data0 ) & 0xff /* Character 4 */);
2516 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2517 "FC event (%02x %02x %02x)\n",
2518 (data1 >> 8) & 0xff, /* Port */
2519 (data0 >> 16) & 0xff, /* Event */
2520 (data0 >> 8) & 0xff, /* Character 3 */
2521 (data0 ) & 0xff /* Character 4 */);
2525 case MPI_EVENT_LOGOUT:
2526 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2527 (data1 >> 8) & 0xff, data0);
2529 case MPI_EVENT_QUEUE_FULL:
2531 struct cam_sim *sim;
2532 struct cam_path *tmppath;
2533 struct ccb_relsim crs;
2534 PTR_EVENT_DATA_QUEUE_FULL pqf;
2537 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2538 pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2539 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2540 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2541 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2542 pqf->TargetID) != 0) {
2543 sim = mpt->phydisk_sim;
2547 MPTLOCK_2_CAMLOCK(mpt);
2548 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2549 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2550 pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2551 mpt_prt(mpt, "unable to create a path to send "
2553 CAMLOCK_2_MPTLOCK(mpt);
2556 xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2557 crs.ccb_h.func_code = XPT_REL_SIMQ;
2558 crs.ccb_h.flags = CAM_DEV_QFREEZE;
2559 crs.release_flags = RELSIM_ADJUST_OPENINGS;
2560 crs.openings = pqf->CurrentDepth - 1;
2561 xpt_action((union ccb *)&crs);
2562 if (crs.ccb_h.status != CAM_REQ_CMP) {
2563 mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2565 xpt_free_path(tmppath);
2567 CAMLOCK_2_MPTLOCK(mpt);
2570 case MPI_EVENT_IR_RESYNC_UPDATE:
2571 mpt_prt(mpt, "IR resync update %d completed\n",
2572 (data0 >> 16) & 0xff);
2574 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2577 struct cam_sim *sim;
2578 struct cam_path *tmppath;
2579 PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
2581 psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
2582 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2583 psdsc->TargetID) != 0)
2584 sim = mpt->phydisk_sim;
2587 switch(psdsc->ReasonCode) {
2588 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2589 MPTLOCK_2_CAMLOCK(mpt);
2590 ccb = xpt_alloc_ccb_nowait();
2593 "unable to alloc CCB for rescan\n");
2594 CAMLOCK_2_MPTLOCK(mpt);
2597 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
2598 cam_sim_path(sim), psdsc->TargetID,
2599 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2600 CAMLOCK_2_MPTLOCK(mpt);
2602 "unable to create path for rescan\n");
2607 CAMLOCK_2_MPTLOCK(mpt);
2609 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2610 MPTLOCK_2_CAMLOCK(mpt);
2611 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2612 psdsc->TargetID, CAM_LUN_WILDCARD) !=
2615 "unable to create path for async event");
2616 CAMLOCK_2_MPTLOCK(mpt);
2619 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2620 xpt_free_path(tmppath);
2621 CAMLOCK_2_MPTLOCK(mpt);
2623 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
2624 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
2625 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2628 mpt_lprt(mpt, MPT_PRT_WARN,
2629 "SAS device status change: Bus: 0x%02x TargetID: "
2630 "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
2631 psdsc->TargetID, psdsc->ReasonCode);
2636 case MPI_EVENT_SAS_DISCOVERY_ERROR:
2638 PTR_EVENT_DATA_DISCOVERY_ERROR pde;
2640 pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
2641 pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
2642 mpt_lprt(mpt, MPT_PRT_WARN,
2643 "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
2644 pde->Port, pde->DiscoveryStatus);
2647 case MPI_EVENT_EVENT_CHANGE:
2648 case MPI_EVENT_INTEGRATED_RAID:
2650 case MPI_EVENT_LOG_ENTRY_ADDED:
2651 case MPI_EVENT_SAS_DISCOVERY:
2652 case MPI_EVENT_SAS_PHY_LINK_STATUS:
2653 case MPI_EVENT_SAS_SES:
2656 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2664 * Reply path for all SCSI I/O requests, called from our
2665 * interrupt handler by extracting our handler index from
2666 * the MsgContext field of the reply from the IOC.
2668 * This routine is optimized for the common case of a
2669 * completion without error. All exception handling is
2670 * offloaded to non-inlined helper routines to minimize
2674 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2675 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2677 MSG_SCSI_IO_REQUEST *scsi_req;
2680 if (req->state == REQ_STATE_FREE) {
2681 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2685 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2688 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2693 mpt_req_untimeout(req, mpt_timeout, ccb);
2694 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2696 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2697 bus_dmasync_op_t op;
2699 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2700 op = BUS_DMASYNC_POSTREAD;
2702 op = BUS_DMASYNC_POSTWRITE;
2703 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2704 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2707 if (reply_frame == NULL) {
2709 * Context only reply, completion without error status.
2711 ccb->csio.resid = 0;
2712 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2713 ccb->csio.scsi_status = SCSI_STATUS_OK;
2715 mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2718 if (mpt->outofbeer) {
2719 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2721 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2723 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2724 struct scsi_inquiry_data *iq =
2725 (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2726 if (scsi_req->Function ==
2727 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2729 * Fake out the device type so that only the
2730 * pass-thru device will attach.
2732 iq->device &= ~0x1F;
2733 iq->device |= T_NODEVICE;
2736 if (mpt->verbose == MPT_PRT_DEBUG) {
2737 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2740 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2741 MPTLOCK_2_CAMLOCK(mpt);
2743 CAMLOCK_2_MPTLOCK(mpt);
2744 if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2745 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2747 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2749 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2751 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2752 ("CCB req needed wakeup"));
2754 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2756 mpt_free_request(mpt, req);
2761 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2762 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2764 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2766 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2768 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2770 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2771 /* Record IOC Status and Response Code of TMF for any waiters. */
2772 req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2773 req->ResponseCode = tmf_reply->ResponseCode;
2775 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2776 req, req->serno, le16toh(tmf_reply->IOCStatus));
2777 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2778 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2779 req->state |= REQ_STATE_DONE;
2782 mpt->tmf_req->state = REQ_STATE_FREE;
2788 * XXX: Move to definitions file
2806 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2807 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2810 MSG_LINK_SERVICE_RSP_REQUEST tmp;
2811 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2814 * We are going to reuse the ELS request to send this response back.
2817 memset(rsp, 0, sizeof(*rsp));
2819 #ifdef USE_IMMEDIATE_LINK_DATA
2821 * Apparently the IMMEDIATE stuff doesn't seem to work.
2823 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2825 rsp->RspLength = length;
2826 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2827 rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2830 * Copy over information from the original reply frame to
2831 * it's correct place in the response.
2833 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2836 * And now copy back the temporary area to the original frame.
2838 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2839 rsp = req->req_vbuf;
2841 #ifdef USE_IMMEDIATE_LINK_DATA
2842 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2845 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2846 bus_addr_t paddr = req->req_pbuf;
2847 paddr += MPT_RQSL(mpt);
2850 MPI_SGE_FLAGS_HOST_TO_IOC |
2851 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2852 MPI_SGE_FLAGS_LAST_ELEMENT |
2853 MPI_SGE_FLAGS_END_OF_LIST |
2854 MPI_SGE_FLAGS_END_OF_BUFFER;
2855 fl <<= MPI_SGE_FLAGS_SHIFT;
2857 se->FlagsLength = htole32(fl);
2858 se->Address = htole32((uint32_t) paddr);
2865 mpt_send_cmd(mpt, req);
2869 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2870 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2872 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2873 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2877 U16 status = le16toh(reply_frame->IOCStatus);
2880 int do_refresh = TRUE;
2883 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2884 ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2885 req, req->serno, rp->Function));
2886 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2887 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2889 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2892 mpt_lprt(mpt, MPT_PRT_DEBUG,
2893 "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2894 req, req->serno, reply_frame, reply_frame->Function);
2896 if (status != MPI_IOCSTATUS_SUCCESS) {
2897 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2898 status, reply_frame->Function);
2899 if (status == MPI_IOCSTATUS_INVALID_STATE) {
2901 * XXX: to get around shutdown issue
2910 * If the function of a link service response, we recycle the
2911 * response to be a refresh for a new link service request.
2913 * The request pointer is bogus in this case and we have to fetch
2914 * it based upon the TransactionContext.
2916 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2917 /* Freddie Uncle Charlie Katie */
2918 /* We don't get the IOINDEX as part of the Link Svc Rsp */
2919 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2920 if (mpt->els_cmd_ptrs[ioindex] == req) {
2924 KASSERT(ioindex < mpt->els_cmds_allocated,
2925 ("can't find my mommie!"));
2927 /* remove from active list as we're going to re-post it */
2928 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2929 req->state &= ~REQ_STATE_QUEUED;
2930 req->state |= REQ_STATE_DONE;
2931 mpt_fc_post_els(mpt, req, ioindex);
2935 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2936 /* remove from active list as we're done */
2937 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2938 req->state &= ~REQ_STATE_QUEUED;
2939 req->state |= REQ_STATE_DONE;
2940 if (req->state & REQ_STATE_TIMEDOUT) {
2941 mpt_lprt(mpt, MPT_PRT_DEBUG,
2942 "Sync Primitive Send Completed After Timeout\n");
2943 mpt_free_request(mpt, req);
2944 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2945 mpt_lprt(mpt, MPT_PRT_DEBUG,
2946 "Async Primitive Send Complete\n");
2947 mpt_free_request(mpt, req);
2949 mpt_lprt(mpt, MPT_PRT_DEBUG,
2950 "Sync Primitive Send Complete- Waking Waiter\n");
2956 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2957 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2958 "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2959 rp->MsgLength, rp->MsgFlags);
2963 if (rp->MsgLength <= 5) {
2965 * This is just a ack of an original ELS buffer post
2967 mpt_lprt(mpt, MPT_PRT_DEBUG,
2968 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2973 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2974 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2976 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2977 cmd = be32toh(elsbuf[0]) >> 24;
2979 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2980 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2984 ioindex = le32toh(rp->TransactionContext);
2985 req = mpt->els_cmd_ptrs[ioindex];
2987 if (rctl == ELS && type == 1) {
2991 * Send back a PRLI ACC
2993 mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2994 le32toh(rp->Wwn.PortNameHigh),
2995 le32toh(rp->Wwn.PortNameLow));
2996 elsbuf[0] = htobe32(0x02100014);
2997 elsbuf[1] |= htobe32(0x00000100);
2998 elsbuf[4] = htobe32(0x00000002);
2999 if (mpt->role & MPT_ROLE_TARGET)
3000 elsbuf[4] |= htobe32(0x00000010);
3001 if (mpt->role & MPT_ROLE_INITIATOR)
3002 elsbuf[4] |= htobe32(0x00000020);
3003 /* remove from active list as we're done */
3004 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3005 req->state &= ~REQ_STATE_QUEUED;
3006 req->state |= REQ_STATE_DONE;
3007 mpt_fc_els_send_response(mpt, req, rp, 20);
3011 memset(elsbuf, 0, 5 * (sizeof (U32)));
3012 elsbuf[0] = htobe32(0x02100014);
3013 elsbuf[1] = htobe32(0x08000100);
3014 mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
3015 le32toh(rp->Wwn.PortNameHigh),
3016 le32toh(rp->Wwn.PortNameLow));
3017 /* remove from active list as we're done */
3018 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3019 req->state &= ~REQ_STATE_QUEUED;
3020 req->state |= REQ_STATE_DONE;
3021 mpt_fc_els_send_response(mpt, req, rp, 20);
3025 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
3028 } else if (rctl == ABTS && type == 0) {
3029 uint16_t rx_id = le16toh(rp->Rxid);
3030 uint16_t ox_id = le16toh(rp->Oxid);
3031 request_t *tgt_req = NULL;
3034 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
3035 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
3036 le32toh(rp->Wwn.PortNameLow));
3037 if (rx_id >= mpt->mpt_max_tgtcmds) {
3038 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
3039 } else if (mpt->tgt_cmd_ptrs == NULL) {
3040 mpt_prt(mpt, "No TGT CMD PTRS\n");
3042 tgt_req = mpt->tgt_cmd_ptrs[rx_id];
3045 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
3050 * Check to make sure we have the correct command
3051 * The reply descriptor in the target state should
3052 * should contain an IoIndex that should match the
3055 * It'd be nice to have OX_ID to crosscheck with
3058 ct_id = GET_IO_INDEX(tgt->reply_desc);
3060 if (ct_id != rx_id) {
3061 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
3062 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
3070 "CCB (%p): lun %u flags %x status %x\n",
3071 ccb, ccb->ccb_h.target_lun,
3072 ccb->ccb_h.flags, ccb->ccb_h.status);
3074 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
3075 "%x nxfers %x\n", tgt->state,
3076 tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
3079 if (mpt_abort_target_cmd(mpt, tgt_req)) {
3080 mpt_prt(mpt, "unable to start TargetAbort\n");
3083 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
3085 memset(elsbuf, 0, 5 * (sizeof (U32)));
3086 elsbuf[0] = htobe32(0);
3087 elsbuf[1] = htobe32((ox_id << 16) | rx_id);
3088 elsbuf[2] = htobe32(0x000ffff);
3090 * Dork with the reply frame so that the response to it
3093 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
3094 /* remove from active list as we're done */
3095 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3096 req->state &= ~REQ_STATE_QUEUED;
3097 req->state |= REQ_STATE_DONE;
3098 mpt_fc_els_send_response(mpt, req, rp, 12);
3101 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3103 if (do_refresh == TRUE) {
3104 /* remove from active list as we're done */
3105 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3106 req->state &= ~REQ_STATE_QUEUED;
3107 req->state |= REQ_STATE_DONE;
3108 mpt_fc_post_els(mpt, req, ioindex);
3114 * Clean up all SCSI Initiator personality state in response
3115 * to a controller reset.
3118 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3122 * The pending list is already run down by
3123 * the generic handler. Perform the same
3124 * operation on the timed out request list.
3126 mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3127 MPI_IOCSTATUS_INVALID_STATE);
3130 * XXX: We need to repost ELS and Target Command Buffers?
3134 * Inform the XPT that a bus reset has occurred.
3136 xpt_async(AC_BUS_RESET, mpt->path, NULL);
3140 * Parse additional completion information in the reply
3141 * frame for SCSI I/O requests.
3144 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3145 MSG_DEFAULT_REPLY *reply_frame)
3148 MSG_SCSI_IO_REPLY *scsi_io_reply;
3152 MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3153 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3154 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3155 ("MPT SCSI I/O Handler called with incorrect reply type"));
3156 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3157 ("MPT SCSI I/O Handler called with continuation reply"));
3159 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3160 ioc_status = le16toh(scsi_io_reply->IOCStatus);
3161 ioc_status &= MPI_IOCSTATUS_MASK;
3162 sstate = scsi_io_reply->SCSIState;
3166 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3168 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3169 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3170 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3171 ccb->csio.sense_resid =
3172 ccb->csio.sense_len - le32toh(scsi_io_reply->SenseCount);
3173 bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3174 min(ccb->csio.sense_len,
3175 le32toh(scsi_io_reply->SenseCount)));
3178 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3180 * Tag messages rejected, but non-tagged retry
3183 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3187 switch(ioc_status) {
3188 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3191 * Linux driver indicates that a zero
3192 * transfer length with this error code
3193 * indicates a CRC error.
3195 * No need to swap the bytes for checking
3198 if (scsi_io_reply->TransferCount == 0) {
3199 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3203 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3204 case MPI_IOCSTATUS_SUCCESS:
3205 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3206 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3208 * Status was never returned for this transaction.
3210 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3211 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3212 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3213 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3214 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3215 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3216 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3218 /* XXX Handle SPI-Packet and FCP-2 response info. */
3219 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3221 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3223 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3224 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3226 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3227 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3229 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3231 * Since selection timeouts and "device really not
3232 * there" are grouped into this error code, report
3233 * selection timeout. Selection timeouts are
3234 * typically retried before giving up on the device
3235 * whereas "device not there" errors are considered
3238 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3240 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3241 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3243 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3244 mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3246 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3247 mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3249 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3250 ccb->ccb_h.status = CAM_UA_TERMIO;
3252 case MPI_IOCSTATUS_INVALID_STATE:
3254 * The IOC has been reset. Emulate a bus reset.
3257 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3258 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3260 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3261 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3263 * Don't clobber any timeout status that has
3264 * already been set for this transaction. We
3265 * want the SCSI layer to be able to differentiate
3266 * between the command we aborted due to timeout
3267 * and any innocent bystanders.
3269 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3271 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3274 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3275 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3277 case MPI_IOCSTATUS_BUSY:
3278 mpt_set_ccb_status(ccb, CAM_BUSY);
3280 case MPI_IOCSTATUS_INVALID_FUNCTION:
3281 case MPI_IOCSTATUS_INVALID_SGL:
3282 case MPI_IOCSTATUS_INTERNAL_ERROR:
3283 case MPI_IOCSTATUS_INVALID_FIELD:
3286 * Some of the above may need to kick
3287 * of a recovery action!!!!
3289 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3293 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3294 mpt_freeze_ccb(ccb);
3301 mpt_action(struct cam_sim *sim, union ccb *ccb)
3303 struct mpt_softc *mpt;
3304 struct ccb_trans_settings *cts;
3309 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3311 mpt = (struct mpt_softc *)cam_sim_softc(sim);
3312 raid_passthru = (sim == mpt->phydisk_sim);
3313 MPT_LOCK_ASSERT(mpt);
3315 tgt = ccb->ccb_h.target_id;
3316 lun = ccb->ccb_h.target_lun;
3317 if (raid_passthru &&
3318 ccb->ccb_h.func_code != XPT_PATH_INQ &&
3319 ccb->ccb_h.func_code != XPT_RESET_BUS &&
3320 ccb->ccb_h.func_code != XPT_RESET_DEV) {
3321 CAMLOCK_2_MPTLOCK(mpt);
3322 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3323 MPTLOCK_2_CAMLOCK(mpt);
3324 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3325 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3329 MPTLOCK_2_CAMLOCK(mpt);
3331 ccb->ccb_h.ccb_mpt_ptr = mpt;
3333 switch (ccb->ccb_h.func_code) {
3334 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3336 * Do a couple of preliminary checks...
3338 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3339 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3340 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3341 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3345 /* Max supported CDB length is 16 bytes */
3346 /* XXX Unless we implement the new 32byte message type */
3347 if (ccb->csio.cdb_len >
3348 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3349 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3350 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3353 #ifdef MPT_TEST_MULTIPATH
3354 if (mpt->failure_id == ccb->ccb_h.target_id) {
3355 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3356 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3360 ccb->csio.scsi_status = SCSI_STATUS_OK;
3361 mpt_start(sim, ccb);
3365 if (raid_passthru) {
3366 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3367 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3371 if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3373 xpt_print(ccb->ccb_h.path, "reset bus\n");
3376 xpt_print(ccb->ccb_h.path, "reset device\n");
3378 CAMLOCK_2_MPTLOCK(mpt);
3379 (void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3380 MPTLOCK_2_CAMLOCK(mpt);
3383 * mpt_bus_reset is always successful in that it
3384 * will fall back to a hard reset should a bus
3385 * reset attempt fail.
3387 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3388 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3393 union ccb *accb = ccb->cab.abort_ccb;
3394 CAMLOCK_2_MPTLOCK(mpt);
3395 switch (accb->ccb_h.func_code) {
3396 case XPT_ACCEPT_TARGET_IO:
3397 case XPT_IMMED_NOTIFY:
3398 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3400 case XPT_CONT_TARGET_IO:
3401 mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3402 ccb->ccb_h.status = CAM_UA_ABORT;
3405 ccb->ccb_h.status = CAM_UA_ABORT;
3408 ccb->ccb_h.status = CAM_REQ_INVALID;
3411 MPTLOCK_2_CAMLOCK(mpt);
3415 #ifdef CAM_NEW_TRAN_CODE
3416 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3418 #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
3420 #define DP_DISC_ENABLE 0x1
3421 #define DP_DISC_DISABL 0x2
3422 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
3424 #define DP_TQING_ENABLE 0x4
3425 #define DP_TQING_DISABL 0x8
3426 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
3428 #define DP_WIDE 0x10
3429 #define DP_NARROW 0x20
3430 #define DP_WIDTH (DP_WIDE|DP_NARROW)
3432 #define DP_SYNC 0x40
3434 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
3436 #ifdef CAM_NEW_TRAN_CODE
3437 struct ccb_trans_settings_scsi *scsi;
3438 struct ccb_trans_settings_spi *spi;
3447 if (mpt->is_fc || mpt->is_sas) {
3448 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3452 #ifdef CAM_NEW_TRAN_CODE
3453 scsi = &cts->proto_specific.scsi;
3454 spi = &cts->xport_specific.spi;
3457 * We can be called just to valid transport and proto versions
3459 if (scsi->valid == 0 && spi->valid == 0) {
3460 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3466 * Skip attempting settings on RAID volume disks.
3467 * Other devices on the bus get the normal treatment.
3469 if (mpt->phydisk_sim && raid_passthru == 0 &&
3470 mpt_is_raid_volume(mpt, tgt) != 0) {
3471 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3472 "no transfer settings for RAID vols\n");
3473 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3477 i = mpt->mpt_port_page2.PortSettings &
3478 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3479 j = mpt->mpt_port_page2.PortFlags &
3480 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3481 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3482 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3483 mpt_lprt(mpt, MPT_PRT_ALWAYS,
3484 "honoring BIOS transfer negotiations\n");
3485 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3493 #ifndef CAM_NEW_TRAN_CODE
3494 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
3495 dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
3496 DP_DISC_ENABLE : DP_DISC_DISABL;
3499 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
3500 dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
3501 DP_TQING_ENABLE : DP_TQING_DISABL;
3504 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
3505 dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
3508 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
3509 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
3511 period = cts->sync_period;
3512 offset = cts->sync_offset;
3515 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3516 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3517 DP_DISC_ENABLE : DP_DISC_DISABL;
3520 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3521 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3522 DP_TQING_ENABLE : DP_TQING_DISABL;
3525 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3526 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3527 DP_WIDE : DP_NARROW;
3530 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3532 offset = spi->sync_offset;
3534 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3535 &mpt->mpt_dev_page1[tgt];
3536 offset = ptr->RequestedParameters;
3537 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3538 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3540 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3542 period = spi->sync_period;
3544 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3545 &mpt->mpt_dev_page1[tgt];
3546 period = ptr->RequestedParameters;
3547 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3548 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3551 CAMLOCK_2_MPTLOCK(mpt);
3552 if (dval & DP_DISC_ENABLE) {
3553 mpt->mpt_disc_enable |= (1 << tgt);
3554 } else if (dval & DP_DISC_DISABL) {
3555 mpt->mpt_disc_enable &= ~(1 << tgt);
3557 if (dval & DP_TQING_ENABLE) {
3558 mpt->mpt_tag_enable |= (1 << tgt);
3559 } else if (dval & DP_TQING_DISABL) {
3560 mpt->mpt_tag_enable &= ~(1 << tgt);
3562 if (dval & DP_WIDTH) {
3563 mpt_setwidth(mpt, tgt, 1);
3565 if (dval & DP_SYNC) {
3566 mpt_setsync(mpt, tgt, period, offset);
3569 MPTLOCK_2_CAMLOCK(mpt);
3570 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3573 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3574 "set [%d]: 0x%x period 0x%x offset %d\n",
3575 tgt, dval, period, offset);
3576 if (mpt_update_spi_config(mpt, tgt)) {
3577 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3579 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3581 MPTLOCK_2_CAMLOCK(mpt);
3584 case XPT_GET_TRAN_SETTINGS:
3586 #ifdef CAM_NEW_TRAN_CODE
3587 struct ccb_trans_settings_scsi *scsi;
3589 cts->protocol = PROTO_SCSI;
3591 struct ccb_trans_settings_fc *fc =
3592 &cts->xport_specific.fc;
3593 cts->protocol_version = SCSI_REV_SPC;
3594 cts->transport = XPORT_FC;
3595 cts->transport_version = 0;
3596 fc->valid = CTS_FC_VALID_SPEED;
3597 fc->bitrate = 100000;
3598 } else if (mpt->is_sas) {
3599 struct ccb_trans_settings_sas *sas =
3600 &cts->xport_specific.sas;
3601 cts->protocol_version = SCSI_REV_SPC2;
3602 cts->transport = XPORT_SAS;
3603 cts->transport_version = 0;
3604 sas->valid = CTS_SAS_VALID_SPEED;
3605 sas->bitrate = 300000;
3607 cts->protocol_version = SCSI_REV_2;
3608 cts->transport = XPORT_SPI;
3609 cts->transport_version = 2;
3610 if (mpt_get_spi_settings(mpt, cts) != 0) {
3611 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3615 scsi = &cts->proto_specific.scsi;
3616 scsi->valid = CTS_SCSI_VALID_TQ;
3617 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3621 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3622 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3623 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3624 } else if (mpt->is_sas) {
3625 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3626 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3627 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3628 } else if (mpt_get_spi_settings(mpt, cts) != 0) {
3629 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3633 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3636 case XPT_CALC_GEOMETRY:
3638 struct ccb_calc_geometry *ccg;
3641 if (ccg->block_size == 0) {
3642 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3643 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3646 mpt_calc_geometry(ccg, /*extended*/1);
3647 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
3650 case XPT_PATH_INQ: /* Path routing inquiry */
3652 struct ccb_pathinq *cpi = &ccb->cpi;
3654 cpi->version_num = 1;
3655 cpi->target_sprt = 0;
3656 cpi->hba_eng_cnt = 0;
3657 cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3658 cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3660 * FC cards report MAX_DEVICES of 512, but
3661 * the MSG_SCSI_IO_REQUEST target id field
3662 * is only 8 bits. Until we fix the driver
3663 * to support 'channels' for bus overflow,
3666 if (cpi->max_target > 255) {
3667 cpi->max_target = 255;
3671 * VMware ESX reports > 16 devices and then dies when we probe.
3673 if (mpt->is_spi && cpi->max_target > 15) {
3674 cpi->max_target = 15;
3679 cpi->max_lun = MPT_MAX_LUNS;
3680 cpi->initiator_id = mpt->mpt_ini_id;
3681 cpi->bus_id = cam_sim_bus(sim);
3684 * The base speed is the speed of the underlying connection.
3686 #ifdef CAM_NEW_TRAN_CODE
3687 cpi->protocol = PROTO_SCSI;
3689 cpi->hba_misc = PIM_NOBUSRESET;
3690 cpi->base_transfer_speed = 100000;
3691 cpi->hba_inquiry = PI_TAG_ABLE;
3692 cpi->transport = XPORT_FC;
3693 cpi->transport_version = 0;
3694 cpi->protocol_version = SCSI_REV_SPC;
3695 } else if (mpt->is_sas) {
3696 cpi->hba_misc = PIM_NOBUSRESET;
3697 cpi->base_transfer_speed = 300000;
3698 cpi->hba_inquiry = PI_TAG_ABLE;
3699 cpi->transport = XPORT_SAS;
3700 cpi->transport_version = 0;
3701 cpi->protocol_version = SCSI_REV_SPC2;
3703 cpi->hba_misc = PIM_SEQSCAN;
3704 cpi->base_transfer_speed = 3300;
3705 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3706 cpi->transport = XPORT_SPI;
3707 cpi->transport_version = 2;
3708 cpi->protocol_version = SCSI_REV_2;
3712 cpi->hba_misc = PIM_NOBUSRESET;
3713 cpi->base_transfer_speed = 100000;
3714 cpi->hba_inquiry = PI_TAG_ABLE;
3715 } else if (mpt->is_sas) {
3716 cpi->hba_misc = PIM_NOBUSRESET;
3717 cpi->base_transfer_speed = 300000;
3718 cpi->hba_inquiry = PI_TAG_ABLE;
3720 cpi->hba_misc = PIM_SEQSCAN;
3721 cpi->base_transfer_speed = 3300;
3722 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3727 * We give our fake RAID passhtru bus a width that is MaxVolumes
3728 * wide and restrict it to one lun.
3730 if (raid_passthru) {
3731 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3732 cpi->initiator_id = cpi->max_target + 1;
3736 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3737 cpi->hba_misc |= PIM_NOINITIATOR;
3739 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3741 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3743 cpi->target_sprt = 0;
3745 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3746 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3747 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3748 cpi->unit_number = cam_sim_unit(sim);
3749 cpi->ccb_h.status = CAM_REQ_CMP;
3752 case XPT_EN_LUN: /* Enable LUN as a target */
3756 CAMLOCK_2_MPTLOCK(mpt);
3757 if (ccb->cel.enable)
3758 result = mpt_enable_lun(mpt,
3759 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3761 result = mpt_disable_lun(mpt,
3762 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3763 MPTLOCK_2_CAMLOCK(mpt);
3765 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3767 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3771 case XPT_NOTIFY_ACK: /* recycle notify ack */
3772 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */
3773 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
3775 tgt_resource_t *trtp;
3776 lun_id_t lun = ccb->ccb_h.target_lun;
3777 ccb->ccb_h.sim_priv.entries[0].field = 0;
3778 ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3779 ccb->ccb_h.flags = 0;
3781 if (lun == CAM_LUN_WILDCARD) {
3782 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3783 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3786 trtp = &mpt->trt_wildcard;
3787 } else if (lun >= MPT_MAX_LUNS) {
3788 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3791 trtp = &mpt->trt[lun];
3793 CAMLOCK_2_MPTLOCK(mpt);
3794 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3795 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3796 "Put FREE ATIO %p lun %d\n", ccb, lun);
3797 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3799 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3800 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3801 "Put FREE INOT lun %d\n", lun);
3802 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3805 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3807 mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3808 MPTLOCK_2_CAMLOCK(mpt);
3811 case XPT_CONT_TARGET_IO:
3812 CAMLOCK_2_MPTLOCK(mpt);
3813 mpt_target_start_io(mpt, ccb);
3814 MPTLOCK_2_CAMLOCK(mpt);
3818 ccb->ccb_h.status = CAM_REQ_INVALID;
3825 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3827 #ifdef CAM_NEW_TRAN_CODE
3828 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3829 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3832 uint32_t dval, pval, oval;
3835 if (IS_CURRENT_SETTINGS(cts) == 0) {
3836 tgt = cts->ccb_h.target_id;
3837 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3838 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3842 tgt = cts->ccb_h.target_id;
3846 * We aren't looking at Port Page 2 BIOS settings here-
3847 * sometimes these have been known to be bogus XXX.
3849 * For user settings, we pick the max from port page 0
3851 * For current settings we read the current settings out from
3852 * device page 0 for that target.
3854 if (IS_CURRENT_SETTINGS(cts)) {
3855 CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3858 CAMLOCK_2_MPTLOCK(mpt);
3859 tmp = mpt->mpt_dev_page0[tgt];
3860 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3861 sizeof(tmp), FALSE, 5000);
3863 MPTLOCK_2_CAMLOCK(mpt);
3864 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3867 mpt2host_config_page_scsi_device_0(&tmp);
3869 MPTLOCK_2_CAMLOCK(mpt);
3870 mpt_lprt(mpt, MPT_PRT_DEBUG,
3871 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3872 tmp.NegotiatedParameters, tmp.Information);
3873 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3874 DP_WIDE : DP_NARROW;
3875 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3876 DP_DISC_ENABLE : DP_DISC_DISABL;
3877 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3878 DP_TQING_ENABLE : DP_TQING_DISABL;
3879 oval = tmp.NegotiatedParameters;
3880 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3881 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3882 pval = tmp.NegotiatedParameters;
3883 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3884 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3885 mpt->mpt_dev_page0[tgt] = tmp;
3887 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3888 oval = mpt->mpt_port_page0.Capabilities;
3889 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3890 pval = mpt->mpt_port_page0.Capabilities;
3891 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3894 #ifndef CAM_NEW_TRAN_CODE
3895 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3897 cts->sync_period = pval;
3898 cts->sync_offset = oval;
3899 cts->valid |= CCB_TRANS_SYNC_RATE_VALID;
3900 cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID;
3901 cts->valid |= CCB_TRANS_BUS_WIDTH_VALID;
3902 if (dval & DP_WIDE) {
3903 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3905 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3907 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3908 cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3909 if (dval & DP_DISC_ENABLE) {
3910 cts->flags |= CCB_TRANS_DISC_ENB;
3912 if (dval & DP_TQING_ENABLE) {
3913 cts->flags |= CCB_TRANS_TAG_ENB;
3921 spi->sync_offset = oval;
3922 spi->sync_period = pval;
3923 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3924 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3925 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3926 if (dval & DP_WIDE) {
3927 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3929 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3931 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3932 scsi->valid = CTS_SCSI_VALID_TQ;
3933 if (dval & DP_TQING_ENABLE) {
3934 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3936 spi->valid |= CTS_SPI_VALID_DISC;
3937 if (dval & DP_DISC_ENABLE) {
3938 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3942 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3943 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3944 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3949 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3951 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3953 ptr = &mpt->mpt_dev_page1[tgt];
3955 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3957 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3962 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3964 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3966 ptr = &mpt->mpt_dev_page1[tgt];
3967 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3968 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3969 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3970 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3971 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3975 ptr->RequestedParameters |=
3976 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3977 ptr->RequestedParameters |=
3978 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3980 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3983 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3984 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3989 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3991 CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3994 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3995 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3996 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3997 tmp = mpt->mpt_dev_page1[tgt];
3998 host2mpt_config_page_scsi_device_1(&tmp);
3999 rv = mpt_write_cur_cfg_page(mpt, tgt,
4000 &tmp.Header, sizeof(tmp), FALSE, 5000);
4002 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
4009 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
4011 #if __FreeBSD_version >= 500000
4012 cam_calc_geometry(ccg, extended);
4015 uint32_t secs_per_cylinder;
4017 if (ccg->block_size == 0) {
4018 ccg->ccb_h.status = CAM_REQ_INVALID;
4021 size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
4022 if (size_mb > 1024 && extended) {
4024 ccg->secs_per_track = 63;
4027 ccg->secs_per_track = 32;
4029 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
4030 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
4031 ccg->ccb_h.status = CAM_REQ_CMP;
4035 /****************************** Timeout Recovery ******************************/
4037 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
4041 error = mpt_kthread_create(mpt_recovery_thread, mpt,
4042 &mpt->recovery_thread, /*flags*/0,
4043 /*altstack*/0, "mpt_recovery%d", mpt->unit);
4048 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
4051 if (mpt->recovery_thread == NULL) {
4054 mpt->shutdwn_recovery = 1;
4057 * Sleep on a slightly different location
4058 * for this interlock just for added safety.
4060 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
4064 mpt_recovery_thread(void *arg)
4066 struct mpt_softc *mpt;
4068 mpt = (struct mpt_softc *)arg;
4071 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4072 if (mpt->shutdwn_recovery == 0) {
4073 mpt_sleep(mpt, mpt, PUSER, "idle", 0);
4076 if (mpt->shutdwn_recovery != 0) {
4079 mpt_recover_commands(mpt);
4081 mpt->recovery_thread = NULL;
4082 wakeup(&mpt->recovery_thread);
4084 mpt_kthread_exit(0);
4088 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
4089 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
4091 MSG_SCSI_TASK_MGMT *tmf_req;
4095 * Wait for any current TMF request to complete.
4096 * We're only allowed to issue one TMF at a time.
4098 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
4099 sleep_ok, MPT_TMF_MAX_TIMEOUT);
4101 mpt_reset(mpt, TRUE);
4105 mpt_assign_serno(mpt, mpt->tmf_req);
4106 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
4108 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
4109 memset(tmf_req, 0, sizeof(*tmf_req));
4110 tmf_req->TargetID = target;
4111 tmf_req->Bus = channel;
4112 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
4113 tmf_req->TaskType = type;
4114 tmf_req->MsgFlags = flags;
4115 tmf_req->MsgContext =
4116 htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
4117 if (lun > MPT_MAX_LUNS) {
4118 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4119 tmf_req->LUN[1] = lun & 0xff;
4121 tmf_req->LUN[1] = lun;
4123 tmf_req->TaskMsgContext = abort_ctx;
4125 mpt_lprt(mpt, MPT_PRT_DEBUG,
4126 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
4127 mpt->tmf_req->serno, tmf_req->MsgContext);
4128 if (mpt->verbose > MPT_PRT_DEBUG) {
4129 mpt_print_request(tmf_req);
4132 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
4133 ("mpt_scsi_send_tmf: tmf_req already on pending list"));
4134 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
4135 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
4136 if (error != MPT_OK) {
4137 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
4138 mpt->tmf_req->state = REQ_STATE_FREE;
4139 mpt_reset(mpt, TRUE);
4145 * When a command times out, it is placed on the requeust_timeout_list
4146 * and we wake our recovery thread. The MPT-Fusion architecture supports
4147 * only a single TMF operation at a time, so we serially abort/bdr, etc,
4148 * the timedout transactions. The next TMF is issued either by the
4149 * completion handler of the current TMF waking our recovery thread,
4150 * or the TMF timeout handler causing a hard reset sequence.
4153 mpt_recover_commands(struct mpt_softc *mpt)
4159 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4161 * No work to do- leave.
4163 mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
4168 * Flush any commands whose completion coincides with their timeout.
4172 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4174 * The timedout commands have already
4175 * completed. This typically means
4176 * that either the timeout value was on
4177 * the hairy edge of what the device
4178 * requires or - more likely - interrupts
4179 * are not happening.
4181 mpt_prt(mpt, "Timedout requests already complete. "
4182 "Interrupts may not be functioning.\n");
4183 mpt_enable_ints(mpt);
4188 * We have no visibility into the current state of the
4189 * controller, so attempt to abort the commands in the
4190 * order they timed-out. For initiator commands, we
4191 * depend on the reply handler pulling requests off
4194 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4197 MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4199 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4200 req, req->serno, hdrp->Function);
4203 mpt_prt(mpt, "null ccb in timed out request. "
4204 "Resetting Controller.\n");
4205 mpt_reset(mpt, TRUE);
4208 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4211 * Check to see if this is not an initiator command and
4212 * deal with it differently if it is.
4214 switch (hdrp->Function) {
4215 case MPI_FUNCTION_SCSI_IO_REQUEST:
4216 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4220 * XXX: FIX ME: need to abort target assists...
4222 mpt_prt(mpt, "just putting it back on the pend q\n");
4223 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4224 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4229 error = mpt_scsi_send_tmf(mpt,
4230 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4231 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4232 htole32(req->index | scsi_io_handler_id), TRUE);
4236 * mpt_scsi_send_tmf hard resets on failure, so no
4237 * need to do so here. Our queue should be emptied
4238 * by the hard reset.
4243 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4244 REQ_STATE_DONE, TRUE, 500);
4246 status = le16toh(mpt->tmf_req->IOCStatus);
4247 response = mpt->tmf_req->ResponseCode;
4248 mpt->tmf_req->state = REQ_STATE_FREE;
4252 * If we've errored out,, reset the controller.
4254 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4255 "Resetting controller\n");
4256 mpt_reset(mpt, TRUE);
4260 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4261 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4262 "Resetting controller.\n", status);
4263 mpt_reset(mpt, TRUE);
4267 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4268 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4269 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4270 "Resetting controller.\n", response);
4271 mpt_reset(mpt, TRUE);
4274 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4278 /************************ Target Mode Support ****************************/
4280 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4282 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4283 PTR_SGE_TRANSACTION32 tep;
4284 PTR_SGE_SIMPLE32 se;
4288 paddr = req->req_pbuf;
4289 paddr += MPT_RQSL(mpt);
4292 memset(fc, 0, MPT_REQUEST_AREA);
4293 fc->BufferCount = 1;
4294 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4295 fc->MsgContext = htole32(req->index | fc_els_handler_id);
4298 * Okay, set up ELS buffer pointers. ELS buffer pointers
4299 * consist of a TE SGL element (with details length of zero)
4300 * followed by a SIMPLE SGL element which holds the address
4304 tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4306 tep->ContextSize = 4;
4308 tep->TransactionContext[0] = htole32(ioindex);
4310 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4312 MPI_SGE_FLAGS_HOST_TO_IOC |
4313 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4314 MPI_SGE_FLAGS_LAST_ELEMENT |
4315 MPI_SGE_FLAGS_END_OF_LIST |
4316 MPI_SGE_FLAGS_END_OF_BUFFER;
4317 fl <<= MPI_SGE_FLAGS_SHIFT;
4318 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4319 se->FlagsLength = htole32(fl);
4320 se->Address = htole32((uint32_t) paddr);
4321 mpt_lprt(mpt, MPT_PRT_DEBUG,
4322 "add ELS index %d ioindex %d for %p:%u\n",
4323 req->index, ioindex, req, req->serno);
4324 KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4325 ("mpt_fc_post_els: request not locked"));
4326 mpt_send_cmd(mpt, req);
4330 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4332 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4333 PTR_CMD_BUFFER_DESCRIPTOR cb;
4336 paddr = req->req_pbuf;
4337 paddr += MPT_RQSL(mpt);
4338 memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4339 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4342 fc->BufferCount = 1;
4343 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4344 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4346 cb = &fc->Buffer[0];
4347 cb->IoIndex = htole16(ioindex);
4348 cb->u.PhysicalAddress32 = htole32((U32) paddr);
4350 mpt_check_doorbell(mpt);
4351 mpt_send_cmd(mpt, req);
4355 mpt_add_els_buffers(struct mpt_softc *mpt)
4359 if (mpt->is_fc == 0) {
4363 if (mpt->els_cmds_allocated) {
4367 mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
4368 M_DEVBUF, M_NOWAIT | M_ZERO);
4370 if (mpt->els_cmd_ptrs == NULL) {
4375 * Feed the chip some ELS buffer resources
4377 for (i = 0; i < MPT_MAX_ELS; i++) {
4378 request_t *req = mpt_get_request(mpt, FALSE);
4382 req->state |= REQ_STATE_LOCKED;
4383 mpt->els_cmd_ptrs[i] = req;
4384 mpt_fc_post_els(mpt, req, i);
4388 mpt_prt(mpt, "unable to add ELS buffer resources\n");
4389 free(mpt->els_cmd_ptrs, M_DEVBUF);
4390 mpt->els_cmd_ptrs = NULL;
4393 if (i != MPT_MAX_ELS) {
4394 mpt_lprt(mpt, MPT_PRT_INFO,
4395 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS);
4397 mpt->els_cmds_allocated = i;
4402 mpt_add_target_commands(struct mpt_softc *mpt)
4406 if (mpt->tgt_cmd_ptrs) {
4410 max = MPT_MAX_REQUESTS(mpt) >> 1;
4411 if (max > mpt->mpt_max_tgtcmds) {
4412 max = mpt->mpt_max_tgtcmds;
4415 malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4416 if (mpt->tgt_cmd_ptrs == NULL) {
4418 "mpt_add_target_commands: could not allocate cmd ptrs\n");
4422 for (i = 0; i < max; i++) {
4425 req = mpt_get_request(mpt, FALSE);
4429 req->state |= REQ_STATE_LOCKED;
4430 mpt->tgt_cmd_ptrs[i] = req;
4431 mpt_post_target_command(mpt, req, i);
4436 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4437 free(mpt->tgt_cmd_ptrs, M_DEVBUF);
4438 mpt->tgt_cmd_ptrs = NULL;
4442 mpt->tgt_cmds_allocated = i;
4445 mpt_lprt(mpt, MPT_PRT_INFO,
4446 "added %d of %d target bufs\n", i, max);
4452 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4455 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4457 } else if (lun >= MPT_MAX_LUNS) {
4459 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4462 if (mpt->tenabled == 0) {
4464 (void) mpt_fc_reset_link(mpt, 0);
4468 if (lun == CAM_LUN_WILDCARD) {
4469 mpt->trt_wildcard.enabled = 1;
4471 mpt->trt[lun].enabled = 1;
4477 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4481 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4483 } else if (lun >= MPT_MAX_LUNS) {
4485 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4488 if (lun == CAM_LUN_WILDCARD) {
4489 mpt->trt_wildcard.enabled = 0;
4491 mpt->trt[lun].enabled = 0;
4493 for (i = 0; i < MPT_MAX_LUNS; i++) {
4494 if (mpt->trt[lun].enabled) {
4498 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4500 (void) mpt_fc_reset_link(mpt, 0);
4508 * Called with MPT lock held
4511 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4513 struct ccb_scsiio *csio = &ccb->csio;
4514 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4515 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4517 switch (tgt->state) {
4518 case TGT_STATE_IN_CAM:
4520 case TGT_STATE_MOVING_DATA:
4521 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4522 xpt_freeze_simq(mpt->sim, 1);
4523 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4524 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4525 MPTLOCK_2_CAMLOCK(mpt);
4527 CAMLOCK_2_MPTLOCK(mpt);
4530 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4531 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4532 mpt_tgt_dump_req_state(mpt, cmd_req);
4533 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4534 MPTLOCK_2_CAMLOCK(mpt);
4536 CAMLOCK_2_MPTLOCK(mpt);
4540 if (csio->dxfer_len) {
4541 bus_dmamap_callback_t *cb;
4542 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4545 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4546 ("dxfer_len %u but direction is NONE\n", csio->dxfer_len));
4548 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4549 if (mpt->outofbeer == 0) {
4551 xpt_freeze_simq(mpt->sim, 1);
4552 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4554 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4555 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4556 MPTLOCK_2_CAMLOCK(mpt);
4558 CAMLOCK_2_MPTLOCK(mpt);
4561 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4562 if (sizeof (bus_addr_t) > 4) {
4563 cb = mpt_execute_req_a64;
4565 cb = mpt_execute_req;
4569 ccb->ccb_h.ccb_req_ptr = req;
4572 * Record the currently active ccb and the
4573 * request for it in our target state area.
4578 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4582 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4584 ta->QueueTag = ssp->InitiatorTag;
4585 } else if (mpt->is_spi) {
4586 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4588 ta->QueueTag = sp->Tag;
4590 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4591 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4592 ta->ReplyWord = htole32(tgt->reply_desc);
4593 if (csio->ccb_h.target_lun > MPT_MAX_LUNS) {
4595 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4596 ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4598 ta->LUN[1] = csio->ccb_h.target_lun;
4601 ta->RelativeOffset = tgt->bytes_xfered;
4602 ta->DataLength = ccb->csio.dxfer_len;
4603 if (ta->DataLength > tgt->resid) {
4604 ta->DataLength = tgt->resid;
4608 * XXX Should be done after data transfer completes?
4610 tgt->resid -= csio->dxfer_len;
4611 tgt->bytes_xfered += csio->dxfer_len;
4613 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4614 ta->TargetAssistFlags |=
4615 TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4618 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4619 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4620 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4621 ta->TargetAssistFlags |=
4622 TARGET_ASSIST_FLAGS_AUTO_STATUS;
4625 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4627 mpt_lprt(mpt, MPT_PRT_DEBUG,
4628 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4629 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4630 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4632 MPTLOCK_2_CAMLOCK(mpt);
4633 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
4634 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
4636 int s = splsoftvm();
4637 error = bus_dmamap_load(mpt->buffer_dmat,
4638 req->dmap, csio->data_ptr, csio->dxfer_len,
4641 if (error == EINPROGRESS) {
4642 xpt_freeze_simq(mpt->sim, 1);
4643 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4647 * We have been given a pointer to single
4650 struct bus_dma_segment seg;
4651 seg.ds_addr = (bus_addr_t)
4652 (vm_offset_t)csio->data_ptr;
4653 seg.ds_len = csio->dxfer_len;
4654 (*cb)(req, &seg, 1, 0);
4658 * We have been given a list of addresses.
4659 * This case could be easily supported but they are not
4660 * currently generated by the CAM subsystem so there
4661 * is no point in wasting the time right now.
4663 struct bus_dma_segment *sgs;
4664 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4665 (*cb)(req, NULL, 0, EFAULT);
4667 /* Just use the segments provided */
4668 sgs = (struct bus_dma_segment *)csio->data_ptr;
4669 (*cb)(req, sgs, csio->sglist_cnt, 0);
4672 CAMLOCK_2_MPTLOCK(mpt);
4674 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4677 * XXX: I don't know why this seems to happen, but
4678 * XXX: completing the CCB seems to make things happy.
4679 * XXX: This seems to happen if the initiator requests
4680 * XXX: enough data that we have to do multiple CTIOs.
4682 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4683 mpt_lprt(mpt, MPT_PRT_DEBUG,
4684 "Meaningless STATUS CCB (%p): flags %x status %x "
4685 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4686 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4687 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4688 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4689 MPTLOCK_2_CAMLOCK(mpt);
4691 CAMLOCK_2_MPTLOCK(mpt);
4694 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4696 memcpy(sp, &csio->sense_data,
4697 min(csio->sense_len, MPT_SENSE_SIZE));
4699 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4704 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4705 uint32_t lun, int send, uint8_t *data, size_t length)
4707 mpt_tgt_state_t *tgt;
4708 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4716 * We enter with resid set to the data load for the command.
4718 tgt = MPT_TGT_STATE(mpt, cmd_req);
4719 if (length == 0 || tgt->resid == 0) {
4721 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4725 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4726 mpt_prt(mpt, "out of resources- dropping local response\n");
4732 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4736 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4737 ta->QueueTag = ssp->InitiatorTag;
4738 } else if (mpt->is_spi) {
4739 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4740 ta->QueueTag = sp->Tag;
4742 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4743 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4744 ta->ReplyWord = htole32(tgt->reply_desc);
4745 if (lun > MPT_MAX_LUNS) {
4746 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4747 ta->LUN[1] = lun & 0xff;
4751 ta->RelativeOffset = 0;
4752 ta->DataLength = length;
4754 dptr = req->req_vbuf;
4755 dptr += MPT_RQSL(mpt);
4756 pptr = req->req_pbuf;
4757 pptr += MPT_RQSL(mpt);
4758 memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4760 se = (SGE_SIMPLE32 *) &ta->SGL[0];
4761 memset(se, 0,sizeof (*se));
4763 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4765 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4766 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4769 MPI_pSGE_SET_LENGTH(se, length);
4770 flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4771 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4772 MPI_pSGE_SET_FLAGS(se, flags);
4776 tgt->resid -= length;
4777 tgt->bytes_xfered = length;
4778 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4779 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4781 tgt->state = TGT_STATE_MOVING_DATA;
4783 mpt_send_cmd(mpt, req);
4787 * Abort queued up CCBs
4790 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4792 struct mpt_hdr_stailq *lp;
4793 struct ccb_hdr *srch;
4795 union ccb *accb = ccb->cab.abort_ccb;
4796 tgt_resource_t *trtp;
4798 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4800 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4801 trtp = &mpt->trt_wildcard;
4803 trtp = &mpt->trt[ccb->ccb_h.target_lun];
4806 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4808 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4811 return (CAM_REQ_INVALID);
4814 STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4815 if (srch == &accb->ccb_h) {
4817 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4822 accb->ccb_h.status = CAM_REQ_ABORTED;
4824 return (CAM_REQ_CMP);
4826 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4827 return (CAM_PATH_INVALID);
4831 * Ask the MPT to abort the current target command
4834 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4838 PTR_MSG_TARGET_MODE_ABORT abtp;
4840 req = mpt_get_request(mpt, FALSE);
4844 abtp = req->req_vbuf;
4845 memset(abtp, 0, sizeof (*abtp));
4847 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4848 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4849 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4850 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4852 if (mpt->is_fc || mpt->is_sas) {
4853 mpt_send_cmd(mpt, req);
4855 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4861 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4862 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4863 * FC929 to set bogus FC_RSP fields (nonzero residuals
4864 * but w/o RESID fields set). This causes QLogic initiators
4865 * to think maybe that a frame was lost.
4867 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4868 * we use allocated requests to do TARGET_ASSIST and we
4869 * need to know when to release them.
4873 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4874 uint8_t status, uint8_t const *sense_data)
4877 mpt_tgt_state_t *tgt;
4878 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4884 cmd_vbuf = cmd_req->req_vbuf;
4885 cmd_vbuf += MPT_RQSL(mpt);
4886 tgt = MPT_TGT_STATE(mpt, cmd_req);
4888 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4889 if (mpt->outofbeer == 0) {
4891 xpt_freeze_simq(mpt->sim, 1);
4892 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4895 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4896 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4897 MPTLOCK_2_CAMLOCK(mpt);
4899 CAMLOCK_2_MPTLOCK(mpt);
4902 "could not allocate status request- dropping\n");
4908 ccb->ccb_h.ccb_mpt_ptr = mpt;
4909 ccb->ccb_h.ccb_req_ptr = req;
4913 * Record the currently active ccb, if any, and the
4914 * request for it in our target state area.
4918 tgt->state = TGT_STATE_SENDING_STATUS;
4921 paddr = req->req_pbuf;
4922 paddr += MPT_RQSL(mpt);
4924 memset(tp, 0, sizeof (*tp));
4925 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4927 PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4928 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4932 sts_vbuf = req->req_vbuf;
4933 sts_vbuf += MPT_RQSL(mpt);
4934 rsp = (uint32_t *) sts_vbuf;
4935 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4938 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4939 * It has to be big-endian in memory and is organized
4940 * in 32 bit words, which are much easier to deal with
4941 * as words which are swizzled as needed.
4943 * All we're filling here is the FC_RSP payload.
4944 * We may just have the chip synthesize it if
4945 * we have no residual and an OK status.
4948 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4952 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */
4953 rsp[3] = htobe32(tgt->resid);
4954 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4955 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4958 if (status == SCSI_STATUS_CHECK_COND) {
4961 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */
4962 rsp[4] = htobe32(MPT_SENSE_SIZE);
4964 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4966 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4967 "TION but no sense data?\n");
4968 memset(&rsp, 0, MPT_SENSE_SIZE);
4970 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4971 rsp[i] = htobe32(rsp[i]);
4973 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4974 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4977 #ifndef WE_TRUST_AUTO_GOOD_STATUS
4978 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4980 rsp[2] = htobe32(rsp[2]);
4981 } else if (mpt->is_sas) {
4982 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4983 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4984 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4986 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4987 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4988 tp->StatusCode = status;
4989 tp->QueueTag = htole16(sp->Tag);
4990 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4993 tp->ReplyWord = htole32(tgt->reply_desc);
4994 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4996 #ifdef WE_CAN_USE_AUTO_REPOST
4997 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4999 if (status == SCSI_STATUS_OK && resplen == 0) {
5000 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
5002 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
5004 MPI_SGE_FLAGS_HOST_TO_IOC |
5005 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
5006 MPI_SGE_FLAGS_LAST_ELEMENT |
5007 MPI_SGE_FLAGS_END_OF_LIST |
5008 MPI_SGE_FLAGS_END_OF_BUFFER;
5009 fl <<= MPI_SGE_FLAGS_SHIFT;
5011 tp->StatusDataSGE.FlagsLength = htole32(fl);
5014 mpt_lprt(mpt, MPT_PRT_DEBUG,
5015 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
5016 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
5017 req->serno, tgt->resid);
5019 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
5020 mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb);
5022 mpt_send_cmd(mpt, req);
5026 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
5027 tgt_resource_t *trtp, int init_id)
5029 struct ccb_immed_notify *inot;
5030 mpt_tgt_state_t *tgt;
5032 tgt = MPT_TGT_STATE(mpt, req);
5033 inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
5035 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
5036 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
5039 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
5040 mpt_lprt(mpt, MPT_PRT_DEBUG1,
5041 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
5043 memset(&inot->sense_data, 0, sizeof (inot->sense_data));
5044 inot->sense_len = 0;
5045 memset(inot->message_args, 0, sizeof (inot->message_args));
5046 inot->initiator_id = init_id; /* XXX */
5049 * This is a somewhat grotesque attempt to map from task management
5050 * to old style SCSI messages. God help us all.
5053 case MPT_ABORT_TASK_SET:
5054 inot->message_args[0] = MSG_ABORT_TAG;
5056 case MPT_CLEAR_TASK_SET:
5057 inot->message_args[0] = MSG_CLEAR_TASK_SET;
5059 case MPT_TARGET_RESET:
5060 inot->message_args[0] = MSG_TARGET_RESET;
5063 inot->message_args[0] = MSG_CLEAR_ACA;
5065 case MPT_TERMINATE_TASK:
5066 inot->message_args[0] = MSG_ABORT_TAG;
5069 inot->message_args[0] = MSG_NOOP;
5072 tgt->ccb = (union ccb *) inot;
5073 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
5074 MPTLOCK_2_CAMLOCK(mpt);
5075 xpt_done((union ccb *)inot);
5076 CAMLOCK_2_MPTLOCK(mpt);
5080 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
5082 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
5083 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
5084 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ',
5085 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I',
5086 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V',
5089 struct ccb_accept_tio *atiop;
5092 mpt_tgt_state_t *tgt;
5093 tgt_resource_t *trtp = NULL;
5098 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
5102 * Stash info for the current command where we can get at it later.
5104 vbuf = req->req_vbuf;
5105 vbuf += MPT_RQSL(mpt);
5108 * Get our state pointer set up.
5110 tgt = MPT_TGT_STATE(mpt, req);
5111 if (tgt->state != TGT_STATE_LOADED) {
5112 mpt_tgt_dump_req_state(mpt, req);
5113 panic("bad target state in mpt_scsi_tgt_atio");
5115 memset(tgt, 0, sizeof (mpt_tgt_state_t));
5116 tgt->state = TGT_STATE_IN_CAM;
5117 tgt->reply_desc = reply_desc;
5118 ioindex = GET_IO_INDEX(reply_desc);
5119 if (mpt->verbose >= MPT_PRT_DEBUG) {
5120 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
5121 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
5122 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
5123 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
5126 PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
5127 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
5128 if (fc->FcpCntl[2]) {
5130 * Task Management Request
5132 switch (fc->FcpCntl[2]) {
5134 fct = MPT_ABORT_TASK_SET;
5137 fct = MPT_CLEAR_TASK_SET;
5140 fct = MPT_TARGET_RESET;
5143 fct = MPT_CLEAR_ACA;
5146 fct = MPT_TERMINATE_TASK;
5149 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
5151 mpt_scsi_tgt_status(mpt, 0, req,
5156 switch (fc->FcpCntl[1]) {
5158 tag_action = MSG_SIMPLE_Q_TAG;
5161 tag_action = MSG_HEAD_OF_Q_TAG;
5164 tag_action = MSG_ORDERED_Q_TAG;
5168 * Bah. Ignore Untagged Queing and ACA
5170 tag_action = MSG_SIMPLE_Q_TAG;
5174 tgt->resid = be32toh(fc->FcpDl);
5176 lunptr = fc->FcpLun;
5177 itag = be16toh(fc->OptionalOxid);
5178 } else if (mpt->is_sas) {
5179 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
5180 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
5182 lunptr = ssp->LogicalUnitNumber;
5183 itag = ssp->InitiatorTag;
5185 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
5186 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
5188 lunptr = sp->LogicalUnitNumber;
5193 * Generate a simple lun
5195 switch (lunptr[0] & 0xc0) {
5197 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
5203 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
5209 * Deal with non-enabled or bad luns here.
5211 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
5212 mpt->trt[lun].enabled == 0) {
5213 if (mpt->twildcard) {
5214 trtp = &mpt->trt_wildcard;
5215 } else if (fct == MPT_NIL_TMT_VALUE) {
5217 * In this case, we haven't got an upstream listener
5218 * for either a specific lun or wildcard luns. We
5219 * have to make some sensible response. For regular
5220 * inquiry, just return some NOT HERE inquiry data.
5221 * For VPD inquiry, report illegal field in cdb.
5222 * For REQUEST SENSE, just return NO SENSE data.
5223 * REPORT LUNS gets illegal command.
5224 * All other commands get 'no such device'.
5226 uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
5229 memset(buf, 0, MPT_SENSE_SIZE);
5230 cond = SCSI_STATUS_CHECK_COND;
5235 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5245 len = min(tgt->resid, cdbp[4]);
5246 len = min(len, sizeof (null_iqd));
5247 mpt_lprt(mpt, MPT_PRT_DEBUG,
5248 "local inquiry %ld bytes\n", (long) len);
5249 mpt_scsi_tgt_local(mpt, req, lun, 1,
5256 len = min(tgt->resid, cdbp[4]);
5257 len = min(len, sizeof (buf));
5258 mpt_lprt(mpt, MPT_PRT_DEBUG,
5259 "local reqsense %ld bytes\n", (long) len);
5260 mpt_scsi_tgt_local(mpt, req, lun, 1,
5265 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
5269 mpt_lprt(mpt, MPT_PRT_DEBUG,
5270 "CMD 0x%x to unmanaged lun %u\n",
5275 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
5278 /* otherwise, leave trtp NULL */
5280 trtp = &mpt->trt[lun];
5284 * Deal with any task management
5286 if (fct != MPT_NIL_TMT_VALUE) {
5288 mpt_prt(mpt, "task mgmt function %x but no listener\n",
5290 mpt_scsi_tgt_status(mpt, 0, req,
5293 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5294 GET_INITIATOR_INDEX(reply_desc));
5300 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5301 if (atiop == NULL) {
5302 mpt_lprt(mpt, MPT_PRT_WARN,
5303 "no ATIOs for lun %u- sending back %s\n", lun,
5304 mpt->tenabled? "QUEUE FULL" : "BUSY");
5305 mpt_scsi_tgt_status(mpt, NULL, req,
5306 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5310 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5311 mpt_lprt(mpt, MPT_PRT_DEBUG1,
5312 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
5313 atiop->ccb_h.ccb_mpt_ptr = mpt;
5314 atiop->ccb_h.status = CAM_CDB_RECVD;
5315 atiop->ccb_h.target_lun = lun;
5316 atiop->sense_len = 0;
5317 atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5318 atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
5319 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5322 * The tag we construct here allows us to find the
5323 * original request that the command came in with.
5325 * This way we don't have to depend on anything but the
5326 * tag to find things when CCBs show back up from CAM.
5328 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5329 tgt->tag_id = atiop->tag_id;
5331 atiop->tag_action = tag_action;
5332 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
5334 if (mpt->verbose >= MPT_PRT_DEBUG) {
5336 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
5337 atiop->ccb_h.target_lun);
5338 for (i = 0; i < atiop->cdb_len; i++) {
5339 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5340 (i == (atiop->cdb_len - 1))? '>' : ' ');
5342 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5343 itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
5346 MPTLOCK_2_CAMLOCK(mpt);
5347 xpt_done((union ccb *)atiop);
5348 CAMLOCK_2_MPTLOCK(mpt);
5352 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5354 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5356 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5357 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
5358 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
5359 tgt->tag_id, tgt->state);
5363 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5366 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5367 req->index, req->index, req->state);
5368 mpt_tgt_dump_tgt_state(mpt, req);
5372 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5373 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5379 if (reply_frame == NULL) {
5381 * Figure out what the state of the command is.
5383 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5386 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5388 mpt_req_not_spcl(mpt, tgt->req,
5389 "turbo scsi_tgt_reply associated req", __LINE__);
5392 switch(tgt->state) {
5393 case TGT_STATE_LOADED:
5395 * This is a new command starting.
5397 mpt_scsi_tgt_atio(mpt, req, reply_desc);
5399 case TGT_STATE_MOVING_DATA:
5401 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
5404 if (tgt->req == NULL) {
5405 panic("mpt: turbo target reply with null "
5406 "associated request moving data");
5410 if (tgt->is_local == 0) {
5411 panic("mpt: turbo target reply with "
5412 "null associated ccb moving data");
5415 mpt_lprt(mpt, MPT_PRT_DEBUG,
5416 "TARGET_ASSIST local done\n");
5417 TAILQ_REMOVE(&mpt->request_pending_list,
5419 mpt_free_request(mpt, tgt->req);
5421 mpt_scsi_tgt_status(mpt, NULL, req,
5427 mpt_req_untimeout(req, mpt_timeout, ccb);
5428 mpt_lprt(mpt, MPT_PRT_DEBUG,
5429 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5430 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5432 * Free the Target Assist Request
5434 KASSERT(tgt->req->ccb == ccb,
5435 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5436 tgt->req->serno, tgt->req->ccb));
5437 TAILQ_REMOVE(&mpt->request_pending_list,
5439 mpt_free_request(mpt, tgt->req);
5443 * Do we need to send status now? That is, are
5444 * we done with all our data transfers?
5446 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5447 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5448 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5449 KASSERT(ccb->ccb_h.status,
5450 ("zero ccb sts at %d\n", __LINE__));
5451 tgt->state = TGT_STATE_IN_CAM;
5452 if (mpt->outofbeer) {
5453 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5455 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5457 MPTLOCK_2_CAMLOCK(mpt);
5459 CAMLOCK_2_MPTLOCK(mpt);
5463 * Otherwise, send status (and sense)
5465 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5467 memcpy(sp, &ccb->csio.sense_data,
5468 min(ccb->csio.sense_len, MPT_SENSE_SIZE));
5470 mpt_scsi_tgt_status(mpt, ccb, req,
5471 ccb->csio.scsi_status, sp);
5474 case TGT_STATE_SENDING_STATUS:
5475 case TGT_STATE_MOVING_DATA_AND_STATUS:
5480 if (tgt->req == NULL) {
5481 panic("mpt: turbo target reply with null "
5482 "associated request sending status");
5489 TGT_STATE_MOVING_DATA_AND_STATUS) {
5492 mpt_req_untimeout(req, mpt_timeout, ccb);
5493 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5494 ccb->ccb_h.status |= CAM_SENT_SENSE;
5496 mpt_lprt(mpt, MPT_PRT_DEBUG,
5497 "TARGET_STATUS tag %x sts %x flgs %x req "
5498 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5499 ccb->ccb_h.flags, tgt->req);
5501 * Free the Target Send Status Request
5503 KASSERT(tgt->req->ccb == ccb,
5504 ("tgt->req %p:%u tgt->req->ccb %p",
5505 tgt->req, tgt->req->serno, tgt->req->ccb));
5507 * Notify CAM that we're done
5509 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5510 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5511 KASSERT(ccb->ccb_h.status,
5512 ("ZERO ccb sts at %d\n", __LINE__));
5515 mpt_lprt(mpt, MPT_PRT_DEBUG,
5516 "TARGET_STATUS non-CAM for req %p:%u\n",
5517 tgt->req, tgt->req->serno);
5519 TAILQ_REMOVE(&mpt->request_pending_list,
5521 mpt_free_request(mpt, tgt->req);
5525 * And re-post the Command Buffer.
5526 * This will reset the state.
5528 ioindex = GET_IO_INDEX(reply_desc);
5529 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5531 mpt_post_target_command(mpt, req, ioindex);
5534 * And post a done for anyone who cares
5537 if (mpt->outofbeer) {
5538 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5540 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5542 MPTLOCK_2_CAMLOCK(mpt);
5544 CAMLOCK_2_MPTLOCK(mpt);
5548 case TGT_STATE_NIL: /* XXX This Never Happens XXX */
5549 tgt->state = TGT_STATE_LOADED;
5552 mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5553 "Reply Function\n", tgt->state);
5558 status = le16toh(reply_frame->IOCStatus);
5559 if (status != MPI_IOCSTATUS_SUCCESS) {
5560 dbg = MPT_PRT_ERROR;
5562 dbg = MPT_PRT_DEBUG1;
5566 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5567 req, req->serno, reply_frame, reply_frame->Function, status);
5569 switch (reply_frame->Function) {
5570 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5572 mpt_tgt_state_t *tgt;
5574 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5576 if (status != MPI_IOCSTATUS_SUCCESS) {
5582 tgt = MPT_TGT_STATE(mpt, req);
5583 KASSERT(tgt->state == TGT_STATE_LOADING,
5584 ("bad state 0x%x on reply to buffer post\n", tgt->state));
5585 mpt_assign_serno(mpt, req);
5586 tgt->state = TGT_STATE_LOADED;
5589 case MPI_FUNCTION_TARGET_ASSIST:
5591 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5593 mpt_prt(mpt, "target assist completion\n");
5594 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5595 mpt_free_request(mpt, req);
5597 case MPI_FUNCTION_TARGET_STATUS_SEND:
5599 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5601 mpt_prt(mpt, "status send completion\n");
5602 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5603 mpt_free_request(mpt, req);
5605 case MPI_FUNCTION_TARGET_MODE_ABORT:
5607 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5608 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5609 PTR_MSG_TARGET_MODE_ABORT abtp =
5610 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5611 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5613 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5615 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5616 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5617 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5618 mpt_free_request(mpt, req);
5622 mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5623 "0x%x\n", reply_frame->Function);