2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
5 * Copyright (c) 2000, 2001 by Greg Ansley
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
61 * Support from LSI-Logic has also gone a great deal toward making this a
62 * workable subsystem and is gratefully acknowledged.
65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66 * Copyright (c) 2005, WHEEL Sp. z o.o.
67 * Copyright (c) 2004, 2005 Justin T. Gibbs
68 * All rights reserved.
70 * Redistribution and use in source and binary forms, with or without
71 * modification, are permitted provided that the following conditions are
73 * 1. Redistributions of source code must retain the above copyright
74 * notice, this list of conditions and the following disclaimer.
75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76 * substantially similar to the "NO WARRANTY" disclaimer below
77 * ("Disclaimer") and any redistribution must be conditioned upon including
78 * a substantially similar Disclaimer requirement for further binary
80 * 3. Neither the names of the above listed copyright holders nor the names
81 * of any contributors may be used to endorse or promote products derived
82 * from this software without specific prior written permission.
84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96 #include <sys/cdefs.h>
97 __FBSDID("$FreeBSD$");
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 #include "dev/mpt/mpilib/mpi_sas.h"
109 #include <sys/callout.h>
110 #include <sys/kthread.h>
111 #include <sys/sysctl.h>
113 #if __FreeBSD_version >= 700025
114 #ifndef CAM_NEW_TRAN_CODE
115 #define CAM_NEW_TRAN_CODE 1
119 static void mpt_poll(struct cam_sim *);
120 static timeout_t mpt_timeout;
121 static void mpt_action(struct cam_sim *, union ccb *);
123 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
124 static void mpt_setwidth(struct mpt_softc *, int, int);
125 static void mpt_setsync(struct mpt_softc *, int, int, int);
126 static int mpt_update_spi_config(struct mpt_softc *, int);
128 static mpt_reply_handler_t mpt_scsi_reply_handler;
129 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
130 static mpt_reply_handler_t mpt_fc_els_reply_handler;
131 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
132 MSG_DEFAULT_REPLY *);
133 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
134 static int mpt_fc_reset_link(struct mpt_softc *, int);
136 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
137 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
138 static void mpt_recovery_thread(void *arg);
139 static void mpt_recover_commands(struct mpt_softc *mpt);
141 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
142 u_int, u_int, u_int, int);
144 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
145 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
146 static int mpt_add_els_buffers(struct mpt_softc *mpt);
147 static int mpt_add_target_commands(struct mpt_softc *mpt);
148 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
149 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
150 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
151 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
152 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
153 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
154 uint8_t, uint8_t const *);
156 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
157 tgt_resource_t *, int);
158 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
159 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
160 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
161 static mpt_reply_handler_t mpt_sata_pass_reply_handler;
163 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
164 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
165 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
166 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
168 static mpt_probe_handler_t mpt_cam_probe;
169 static mpt_attach_handler_t mpt_cam_attach;
170 static mpt_enable_handler_t mpt_cam_enable;
171 static mpt_ready_handler_t mpt_cam_ready;
172 static mpt_event_handler_t mpt_cam_event;
173 static mpt_reset_handler_t mpt_cam_ioc_reset;
174 static mpt_detach_handler_t mpt_cam_detach;
176 static struct mpt_personality mpt_cam_personality =
179 .probe = mpt_cam_probe,
180 .attach = mpt_cam_attach,
181 .enable = mpt_cam_enable,
182 .ready = mpt_cam_ready,
183 .event = mpt_cam_event,
184 .reset = mpt_cam_ioc_reset,
185 .detach = mpt_cam_detach,
188 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
189 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
191 int mpt_enable_sata_wc = -1;
192 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
195 mpt_cam_probe(struct mpt_softc *mpt)
200 * Only attach to nodes that support the initiator or target role
201 * (or want to) or have RAID physical devices that need CAM pass-thru
204 if (mpt->do_cfg_role) {
205 role = mpt->cfg_role;
209 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
210 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
217 mpt_cam_attach(struct mpt_softc *mpt)
219 struct cam_devq *devq;
220 mpt_handler_t handler;
225 TAILQ_INIT(&mpt->request_timeout_list);
226 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
227 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
229 handler.reply_handler = mpt_scsi_reply_handler;
230 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
231 &scsi_io_handler_id);
237 handler.reply_handler = mpt_scsi_tmf_reply_handler;
238 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
239 &scsi_tmf_handler_id);
246 * If we're fibre channel and could support target mode, we register
247 * an ELS reply handler and give it resources.
249 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
250 handler.reply_handler = mpt_fc_els_reply_handler;
251 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
257 if (mpt_add_els_buffers(mpt) == FALSE) {
262 maxq -= mpt->els_cmds_allocated;
266 * If we support target mode, we register a reply handler for it,
267 * but don't add command resources until we actually enable target
270 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
271 handler.reply_handler = mpt_scsi_tgt_reply_handler;
272 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
273 &mpt->scsi_tgt_handler_id);
281 handler.reply_handler = mpt_sata_pass_reply_handler;
282 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
283 &sata_pass_handler_id);
291 * We keep one request reserved for timeout TMF requests.
293 mpt->tmf_req = mpt_get_request(mpt, FALSE);
294 if (mpt->tmf_req == NULL) {
295 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
302 * Mark the request as free even though not on the free list.
303 * There is only one TMF request allowed to be outstanding at
304 * a time and the TMF routines perform their own allocation
305 * tracking using the standard state flags.
307 mpt->tmf_req->state = REQ_STATE_FREE;
311 * The rest of this is CAM foo, for which we need to drop our lock
315 if (mpt_spawn_recovery_thread(mpt) != 0) {
316 mpt_prt(mpt, "Unable to spawn recovery thread!\n");
322 * Create the device queue for our SIM(s).
324 devq = cam_simq_alloc(maxq);
326 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
332 * Construct our SIM entry.
335 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
336 if (mpt->sim == NULL) {
337 mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
344 * Register exactly this bus.
347 if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
348 mpt_prt(mpt, "Bus registration Failed!\n");
354 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
355 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
356 mpt_prt(mpt, "Unable to allocate Path!\n");
364 * Only register a second bus for RAID physical
365 * devices if the controller supports RAID.
367 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
372 * Create a "bus" to export all hidden disks to CAM.
375 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
376 if (mpt->phydisk_sim == NULL) {
377 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
386 if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
388 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
394 if (xpt_create_path(&mpt->phydisk_path, NULL,
395 cam_sim_path(mpt->phydisk_sim),
396 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
397 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
403 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
412 * Read FC configuration information
415 mpt_read_config_info_fc(struct mpt_softc *mpt)
417 struct sysctl_ctx_list *ctx;
418 struct sysctl_oid *tree;
419 char *topology = NULL;
422 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
423 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
427 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
428 mpt->mpt_fcport_page0.Header.PageVersion,
429 mpt->mpt_fcport_page0.Header.PageLength,
430 mpt->mpt_fcport_page0.Header.PageNumber,
431 mpt->mpt_fcport_page0.Header.PageType);
434 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
435 sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
437 mpt_prt(mpt, "failed to read FC Port Page 0\n");
440 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
442 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
444 switch (mpt->mpt_fcport_page0.Flags &
445 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
446 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
447 mpt->mpt_fcport_speed = 0;
448 topology = "<NO LOOP>";
450 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
453 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
454 topology = "NL-Port";
456 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
459 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
460 topology = "FL-Port";
463 mpt->mpt_fcport_speed = 0;
468 mpt_lprt(mpt, MPT_PRT_INFO,
469 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
470 "Speed %u-Gbit\n", topology,
471 mpt->mpt_fcport_page0.WWNN.High,
472 mpt->mpt_fcport_page0.WWNN.Low,
473 mpt->mpt_fcport_page0.WWPN.High,
474 mpt->mpt_fcport_page0.WWPN.Low,
475 mpt->mpt_fcport_speed);
477 ctx = device_get_sysctl_ctx(mpt->dev);
478 tree = device_get_sysctl_tree(mpt->dev);
480 snprintf(mpt->scinfo.fc.wwnn, sizeof (mpt->scinfo.fc.wwnn),
481 "0x%08x%08x", mpt->mpt_fcport_page0.WWNN.High,
482 mpt->mpt_fcport_page0.WWNN.Low);
484 snprintf(mpt->scinfo.fc.wwpn, sizeof (mpt->scinfo.fc.wwpn),
485 "0x%08x%08x", mpt->mpt_fcport_page0.WWPN.High,
486 mpt->mpt_fcport_page0.WWPN.Low);
488 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
489 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
490 "World Wide Node Name");
492 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
493 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
494 "World Wide Port Name");
501 * Set FC configuration information.
504 mpt_set_initial_config_fc(struct mpt_softc *mpt)
506 CONFIG_PAGE_FC_PORT_1 fc;
511 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
512 &fc.Header, FALSE, 5000);
514 mpt_prt(mpt, "failed to read FC page 1 header\n");
515 return (mpt_fc_reset_link(mpt, 1));
518 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
519 &fc.Header, sizeof (fc), FALSE, 5000);
521 mpt_prt(mpt, "failed to read FC page 1\n");
522 return (mpt_fc_reset_link(mpt, 1));
524 mpt2host_config_page_fc_port_1(&fc);
527 * Check our flags to make sure we support the role we want.
533 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
534 role |= MPT_ROLE_INITIATOR;
536 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
537 role |= MPT_ROLE_TARGET;
540 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
542 if (mpt->do_cfg_role == 0) {
543 role = mpt->cfg_role;
545 mpt->do_cfg_role = 0;
548 if (role != mpt->cfg_role) {
549 if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
550 if ((role & MPT_ROLE_INITIATOR) == 0) {
551 mpt_prt(mpt, "adding initiator role\n");
552 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
555 mpt_prt(mpt, "keeping initiator role\n");
557 } else if (role & MPT_ROLE_INITIATOR) {
558 mpt_prt(mpt, "removing initiator role\n");
561 if (mpt->cfg_role & MPT_ROLE_TARGET) {
562 if ((role & MPT_ROLE_TARGET) == 0) {
563 mpt_prt(mpt, "adding target role\n");
564 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
567 mpt_prt(mpt, "keeping target role\n");
569 } else if (role & MPT_ROLE_TARGET) {
570 mpt_prt(mpt, "removing target role\n");
573 mpt->role = mpt->cfg_role;
576 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
577 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
578 mpt_prt(mpt, "adding OXID option\n");
579 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
586 host2mpt_config_page_fc_port_1(&fc);
587 r = mpt_write_cfg_page(mpt,
588 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
589 sizeof(fc), FALSE, 5000);
591 mpt_prt(mpt, "failed to update NVRAM with changes\n");
594 mpt_prt(mpt, "NOTE: NVRAM changes will not take "
595 "effect until next reboot or IOC reset\n");
601 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
603 ConfigExtendedPageHeader_t hdr;
604 struct mptsas_phyinfo *phyinfo;
605 SasIOUnitPage0_t *buffer;
608 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
609 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
613 if (hdr.ExtPageLength == 0) {
618 len = hdr.ExtPageLength * 4;
619 buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
620 if (buffer == NULL) {
625 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
626 0, &hdr, buffer, len, 0, 10000);
628 free(buffer, M_DEVBUF);
632 portinfo->num_phys = buffer->NumPhys;
633 portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) *
634 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
635 if (portinfo->phy_info == NULL) {
636 free(buffer, M_DEVBUF);
641 for (i = 0; i < portinfo->num_phys; i++) {
642 phyinfo = &portinfo->phy_info[i];
643 phyinfo->phy_num = i;
644 phyinfo->port_id = buffer->PhyData[i].Port;
645 phyinfo->negotiated_link_rate =
646 buffer->PhyData[i].NegotiatedLinkRate;
648 le16toh(buffer->PhyData[i].ControllerDevHandle);
651 free(buffer, M_DEVBUF);
657 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
658 uint32_t form, uint32_t form_specific)
660 ConfigExtendedPageHeader_t hdr;
661 SasPhyPage0_t *buffer;
664 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
665 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
669 if (hdr.ExtPageLength == 0) {
674 buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
675 if (buffer == NULL) {
680 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
681 form + form_specific, &hdr, buffer,
682 sizeof(SasPhyPage0_t), 0, 10000);
684 free(buffer, M_DEVBUF);
688 phy_info->hw_link_rate = buffer->HwLinkRate;
689 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
690 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
691 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
693 free(buffer, M_DEVBUF);
699 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
700 uint32_t form, uint32_t form_specific)
702 ConfigExtendedPageHeader_t hdr;
703 SasDevicePage0_t *buffer;
704 uint64_t sas_address;
707 bzero(device_info, sizeof(*device_info));
708 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
709 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
713 if (hdr.ExtPageLength == 0) {
718 buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
719 if (buffer == NULL) {
724 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
725 form + form_specific, &hdr, buffer,
726 sizeof(SasDevicePage0_t), 0, 10000);
728 free(buffer, M_DEVBUF);
732 device_info->dev_handle = le16toh(buffer->DevHandle);
733 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
734 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
735 device_info->slot = le16toh(buffer->Slot);
736 device_info->phy_num = buffer->PhyNum;
737 device_info->physical_port = buffer->PhysicalPort;
738 device_info->target_id = buffer->TargetID;
739 device_info->bus = buffer->Bus;
740 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
741 device_info->sas_address = le64toh(sas_address);
742 device_info->device_info = le32toh(buffer->DeviceInfo);
744 free(buffer, M_DEVBUF);
750 * Read SAS configuration information. Nothing to do yet.
753 mpt_read_config_info_sas(struct mpt_softc *mpt)
755 struct mptsas_portinfo *portinfo;
756 struct mptsas_phyinfo *phyinfo;
759 portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
760 if (portinfo == NULL)
763 error = mptsas_sas_io_unit_pg0(mpt, portinfo);
765 free(portinfo, M_DEVBUF);
769 for (i = 0; i < portinfo->num_phys; i++) {
770 phyinfo = &portinfo->phy_info[i];
771 error = mptsas_sas_phy_pg0(mpt, phyinfo,
772 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
773 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
776 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
777 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
778 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
782 phyinfo->identify.phy_num = phyinfo->phy_num = i;
783 if (phyinfo->attached.dev_handle)
784 error = mptsas_sas_device_pg0(mpt,
786 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
787 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
788 phyinfo->attached.dev_handle);
792 mpt->sas_portinfo = portinfo;
797 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
800 SataPassthroughRequest_t *pass;
804 req = mpt_get_request(mpt, 0);
808 pass = req->req_vbuf;
809 bzero(pass, sizeof(SataPassthroughRequest_t));
810 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
811 pass->TargetID = devinfo->target_id;
812 pass->Bus = devinfo->bus;
813 pass->PassthroughFlags = 0;
814 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
815 pass->DataLength = 0;
816 pass->MsgContext = htole32(req->index | sata_pass_handler_id);
817 pass->CommandFIS[0] = 0x27;
818 pass->CommandFIS[1] = 0x80;
819 pass->CommandFIS[2] = 0xef;
820 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
821 pass->CommandFIS[7] = 0x40;
822 pass->CommandFIS[15] = 0x08;
824 mpt_check_doorbell(mpt);
825 mpt_send_cmd(mpt, req);
826 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
829 mpt_free_request(mpt, req);
830 printf("error %d sending passthrough\n", error);
834 status = le16toh(req->IOCStatus);
835 if (status != MPI_IOCSTATUS_SUCCESS) {
836 mpt_free_request(mpt, req);
837 printf("IOCSTATUS %d\n", status);
841 mpt_free_request(mpt, req);
845 * Set SAS configuration information. Nothing to do yet.
848 mpt_set_initial_config_sas(struct mpt_softc *mpt)
850 struct mptsas_phyinfo *phyinfo;
853 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
854 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
855 phyinfo = &mpt->sas_portinfo->phy_info[i];
856 if (phyinfo->attached.dev_handle == 0)
858 if ((phyinfo->attached.device_info &
859 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
862 device_printf(mpt->dev,
863 "%sabling SATA WC on phy %d\n",
864 (mpt_enable_sata_wc) ? "En" : "Dis", i);
865 mptsas_set_sata_wc(mpt, &phyinfo->attached,
874 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
875 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
879 if (reply_frame != NULL) {
880 req->IOCStatus = le16toh(reply_frame->IOCStatus);
882 req->state &= ~REQ_STATE_QUEUED;
883 req->state |= REQ_STATE_DONE;
884 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
885 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
887 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
889 * Whew- we can free this request (late completion)
891 mpt_free_request(mpt, req);
899 * Read SCSI configuration information
902 mpt_read_config_info_spi(struct mpt_softc *mpt)
906 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
907 &mpt->mpt_port_page0.Header, FALSE, 5000);
911 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
912 mpt->mpt_port_page0.Header.PageVersion,
913 mpt->mpt_port_page0.Header.PageLength,
914 mpt->mpt_port_page0.Header.PageNumber,
915 mpt->mpt_port_page0.Header.PageType);
917 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
918 &mpt->mpt_port_page1.Header, FALSE, 5000);
922 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
923 mpt->mpt_port_page1.Header.PageVersion,
924 mpt->mpt_port_page1.Header.PageLength,
925 mpt->mpt_port_page1.Header.PageNumber,
926 mpt->mpt_port_page1.Header.PageType);
928 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
929 &mpt->mpt_port_page2.Header, FALSE, 5000);
933 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
934 mpt->mpt_port_page2.Header.PageVersion,
935 mpt->mpt_port_page2.Header.PageLength,
936 mpt->mpt_port_page2.Header.PageNumber,
937 mpt->mpt_port_page2.Header.PageType);
939 for (i = 0; i < 16; i++) {
940 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
941 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
945 mpt_lprt(mpt, MPT_PRT_DEBUG,
946 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
947 mpt->mpt_dev_page0[i].Header.PageVersion,
948 mpt->mpt_dev_page0[i].Header.PageLength,
949 mpt->mpt_dev_page0[i].Header.PageNumber,
950 mpt->mpt_dev_page0[i].Header.PageType);
952 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
953 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
957 mpt_lprt(mpt, MPT_PRT_DEBUG,
958 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
959 mpt->mpt_dev_page1[i].Header.PageVersion,
960 mpt->mpt_dev_page1[i].Header.PageLength,
961 mpt->mpt_dev_page1[i].Header.PageNumber,
962 mpt->mpt_dev_page1[i].Header.PageType);
966 * At this point, we don't *have* to fail. As long as we have
967 * valid config header information, we can (barely) lurch
971 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
972 sizeof(mpt->mpt_port_page0), FALSE, 5000);
974 mpt_prt(mpt, "failed to read SPI Port Page 0\n");
976 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
977 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
978 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
979 mpt->mpt_port_page0.Capabilities,
980 mpt->mpt_port_page0.PhysicalInterface);
983 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
984 sizeof(mpt->mpt_port_page1), FALSE, 5000);
986 mpt_prt(mpt, "failed to read SPI Port Page 1\n");
988 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
989 mpt_lprt(mpt, MPT_PRT_DEBUG,
990 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
991 mpt->mpt_port_page1.Configuration,
992 mpt->mpt_port_page1.OnBusTimerValue);
995 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
996 sizeof(mpt->mpt_port_page2), FALSE, 5000);
998 mpt_prt(mpt, "failed to read SPI Port Page 2\n");
1000 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1001 "Port Page 2: Flags %x Settings %x\n",
1002 mpt->mpt_port_page2.PortFlags,
1003 mpt->mpt_port_page2.PortSettings);
1004 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1005 for (i = 0; i < 16; i++) {
1006 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1007 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1008 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1009 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1010 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1014 for (i = 0; i < 16; i++) {
1015 rv = mpt_read_cur_cfg_page(mpt, i,
1016 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1020 "cannot read SPI Target %d Device Page 0\n", i);
1023 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1024 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1025 "target %d page 0: Negotiated Params %x Information %x\n",
1026 i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1027 mpt->mpt_dev_page0[i].Information);
1029 rv = mpt_read_cur_cfg_page(mpt, i,
1030 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1034 "cannot read SPI Target %d Device Page 1\n", i);
1037 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1038 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1039 "target %d page 1: Requested Params %x Configuration %x\n",
1040 i, mpt->mpt_dev_page1[i].RequestedParameters,
1041 mpt->mpt_dev_page1[i].Configuration);
1047 * Validate SPI configuration information.
1049 * In particular, validate SPI Port Page 1.
1052 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1054 int error, i, pp1val;
1056 mpt->mpt_disc_enable = 0xff;
1057 mpt->mpt_tag_enable = 0;
1059 pp1val = ((1 << mpt->mpt_ini_id) <<
1060 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
1061 if (mpt->mpt_port_page1.Configuration != pp1val) {
1062 CONFIG_PAGE_SCSI_PORT_1 tmp;
1064 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1065 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1066 tmp = mpt->mpt_port_page1;
1067 tmp.Configuration = pp1val;
1068 host2mpt_config_page_scsi_port_1(&tmp);
1069 error = mpt_write_cur_cfg_page(mpt, 0,
1070 &tmp.Header, sizeof(tmp), FALSE, 5000);
1074 error = mpt_read_cur_cfg_page(mpt, 0,
1075 &tmp.Header, sizeof(tmp), FALSE, 5000);
1079 mpt2host_config_page_scsi_port_1(&tmp);
1080 if (tmp.Configuration != pp1val) {
1082 "failed to reset SPI Port Page 1 Config value\n");
1085 mpt->mpt_port_page1 = tmp;
1089 * The purpose of this exercise is to get
1090 * all targets back to async/narrow.
1092 * We skip this step if the BIOS has already negotiated
1093 * speeds with the targets.
1095 i = mpt->mpt_port_page2.PortSettings &
1096 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1097 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1098 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1099 "honoring BIOS transfer negotiations\n");
1101 for (i = 0; i < 16; i++) {
1102 mpt->mpt_dev_page1[i].RequestedParameters = 0;
1103 mpt->mpt_dev_page1[i].Configuration = 0;
1104 (void) mpt_update_spi_config(mpt, i);
1111 mpt_cam_enable(struct mpt_softc *mpt)
1119 if (mpt_read_config_info_fc(mpt)) {
1122 if (mpt_set_initial_config_fc(mpt)) {
1125 } else if (mpt->is_sas) {
1126 if (mpt_read_config_info_sas(mpt)) {
1129 if (mpt_set_initial_config_sas(mpt)) {
1132 } else if (mpt->is_spi) {
1133 if (mpt_read_config_info_spi(mpt)) {
1136 if (mpt_set_initial_config_spi(mpt)) {
1148 mpt_cam_ready(struct mpt_softc *mpt)
1152 * If we're in target mode, hang out resources now
1153 * so we don't cause the world to hang talking to us.
1155 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1157 * Try to add some target command resources
1160 if (mpt_add_target_commands(mpt) == FALSE) {
1161 mpt_prt(mpt, "failed to add target commands\n");
1169 mpt_cam_detach(struct mpt_softc *mpt)
1171 mpt_handler_t handler;
1175 mpt_terminate_recovery_thread(mpt);
1177 handler.reply_handler = mpt_scsi_reply_handler;
1178 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1179 scsi_io_handler_id);
1180 handler.reply_handler = mpt_scsi_tmf_reply_handler;
1181 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1182 scsi_tmf_handler_id);
1183 handler.reply_handler = mpt_fc_els_reply_handler;
1184 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1186 handler.reply_handler = mpt_scsi_tgt_reply_handler;
1187 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1188 mpt->scsi_tgt_handler_id);
1189 handler.reply_handler = mpt_sata_pass_reply_handler;
1190 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1191 sata_pass_handler_id);
1193 if (mpt->tmf_req != NULL) {
1194 mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1195 mpt_free_request(mpt, mpt->tmf_req);
1196 mpt->tmf_req = NULL;
1198 if (mpt->sas_portinfo != NULL) {
1199 free(mpt->sas_portinfo, M_DEVBUF);
1200 mpt->sas_portinfo = NULL;
1203 if (mpt->sim != NULL) {
1204 xpt_free_path(mpt->path);
1205 xpt_bus_deregister(cam_sim_path(mpt->sim));
1206 cam_sim_free(mpt->sim, TRUE);
1210 if (mpt->phydisk_sim != NULL) {
1211 xpt_free_path(mpt->phydisk_path);
1212 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1213 cam_sim_free(mpt->phydisk_sim, TRUE);
1214 mpt->phydisk_sim = NULL;
1219 /* This routine is used after a system crash to dump core onto the swap device.
1222 mpt_poll(struct cam_sim *sim)
1224 struct mpt_softc *mpt;
1226 mpt = (struct mpt_softc *)cam_sim_softc(sim);
1231 * Watchdog timeout routine for SCSI requests.
1234 mpt_timeout(void *arg)
1237 struct mpt_softc *mpt;
1240 ccb = (union ccb *)arg;
1241 mpt = ccb->ccb_h.ccb_mpt_ptr;
1243 MPT_LOCK_ASSERT(mpt);
1244 req = ccb->ccb_h.ccb_req_ptr;
1245 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1246 req->serno, ccb, req->ccb);
1247 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1248 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1249 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1250 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1251 req->state |= REQ_STATE_TIMEDOUT;
1252 mpt_wakeup_recovery_thread(mpt);
1257 * Callback routine from bus_dmamap_load_ccb(9) or, in simple cases, called
1260 * Takes a list of physical segments and builds the SGL for SCSI IO command
1261 * and forwards the commard to the IOC after one last check that CAM has not
1262 * aborted the transaction.
1265 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1267 request_t *req, *trq;
1270 struct mpt_softc *mpt;
1271 bus_addr_t chain_list_addr;
1272 int first_lim, seg, this_seg_lim;
1273 uint32_t addr, cur_off, flags, nxt_off, tf;
1275 MSG_REQUEST_HEADER *hdrp;
1280 req = (request_t *)arg;
1283 mpt = ccb->ccb_h.ccb_mpt_ptr;
1284 req = ccb->ccb_h.ccb_req_ptr;
1286 hdrp = req->req_vbuf;
1287 mpt_off = req->req_vbuf;
1289 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1294 switch (hdrp->Function) {
1295 case MPI_FUNCTION_SCSI_IO_REQUEST:
1296 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1298 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1300 case MPI_FUNCTION_TARGET_ASSIST:
1302 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1305 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1312 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1314 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1315 nseg, mpt->max_seg_cnt);
1320 if (error != EFBIG && error != ENOMEM) {
1321 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1323 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1325 mpt_freeze_ccb(ccb);
1326 if (error == EFBIG) {
1327 status = CAM_REQ_TOO_BIG;
1328 } else if (error == ENOMEM) {
1329 if (mpt->outofbeer == 0) {
1331 xpt_freeze_simq(mpt->sim, 1);
1332 mpt_lprt(mpt, MPT_PRT_DEBUG,
1335 status = CAM_REQUEUE_REQ;
1337 status = CAM_REQ_CMP_ERR;
1339 mpt_set_ccb_status(ccb, status);
1341 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1342 request_t *cmd_req =
1343 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1344 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1345 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1346 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1348 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1349 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1351 mpt_free_request(mpt, req);
1356 * No data to transfer?
1357 * Just make a single simple SGL with zero length.
1360 if (mpt->verbose >= MPT_PRT_DEBUG) {
1361 int tidx = ((char *)sglp) - mpt_off;
1362 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1366 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1367 MPI_pSGE_SET_FLAGS(se1,
1368 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1369 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1370 se1->FlagsLength = htole32(se1->FlagsLength);
1375 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1377 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1378 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1381 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1382 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1386 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1387 bus_dmasync_op_t op;
1389 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1390 op = BUS_DMASYNC_PREREAD;
1392 op = BUS_DMASYNC_PREWRITE;
1395 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1396 op = BUS_DMASYNC_PREWRITE;
1398 op = BUS_DMASYNC_PREREAD;
1401 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1405 * Okay, fill in what we can at the end of the command frame.
1406 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1407 * the command frame.
1409 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1410 * SIMPLE64 pointers and start doing CHAIN64 entries after
1414 if (nseg < MPT_NSGL_FIRST(mpt)) {
1418 * Leave room for CHAIN element
1420 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1423 se = (SGE_SIMPLE64 *) sglp;
1424 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1426 memset(se, 0, sizeof (*se));
1427 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1428 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1429 if (sizeof(bus_addr_t) > 4) {
1430 addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1431 /* SAS1078 36GB limitation WAR */
1432 if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr +
1433 MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) {
1435 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1437 se->Address.High = htole32(addr);
1439 if (seg == first_lim - 1) {
1440 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1442 if (seg == nseg - 1) {
1443 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1444 MPI_SGE_FLAGS_END_OF_BUFFER;
1446 MPI_pSGE_SET_FLAGS(se, tf);
1447 se->FlagsLength = htole32(se->FlagsLength);
1455 * Tell the IOC where to find the first chain element.
1457 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1458 nxt_off = MPT_RQSL(mpt);
1462 * Make up the rest of the data segments out of a chain element
1463 * (contained in the current request frame) which points to
1464 * SIMPLE64 elements in the next request frame, possibly ending
1465 * with *another* chain element (if there's more).
1467 while (seg < nseg) {
1469 * Point to the chain descriptor. Note that the chain
1470 * descriptor is at the end of the *previous* list (whether
1473 ce = (SGE_CHAIN64 *) se;
1476 * Before we change our current pointer, make sure we won't
1477 * overflow the request area with this frame. Note that we
1478 * test against 'greater than' here as it's okay in this case
1479 * to have next offset be just outside the request area.
1481 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1482 nxt_off = MPT_REQUEST_AREA;
1487 * Set our SGE element pointer to the beginning of the chain
1488 * list and update our next chain list offset.
1490 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1492 nxt_off += MPT_RQSL(mpt);
1495 * Now initialize the chain descriptor.
1497 memset(ce, 0, sizeof (*ce));
1500 * Get the physical address of the chain list.
1502 chain_list_addr = trq->req_pbuf;
1503 chain_list_addr += cur_off;
1504 if (sizeof (bus_addr_t) > 4) {
1506 htole32(((uint64_t)chain_list_addr) >> 32);
1508 ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1509 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1510 MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1513 * If we have more than a frame's worth of segments left,
1514 * set up the chain list to have the last element be another
1517 if ((nseg - seg) > MPT_NSGL(mpt)) {
1518 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1520 * The length of the chain is the length in bytes of the
1521 * number of segments plus the next chain element.
1523 * The next chain descriptor offset is the length,
1524 * in words, of the number of segments.
1526 ce->Length = (this_seg_lim - seg) *
1527 sizeof (SGE_SIMPLE64);
1528 ce->NextChainOffset = ce->Length >> 2;
1529 ce->Length += sizeof (SGE_CHAIN64);
1531 this_seg_lim = nseg;
1532 ce->Length = (this_seg_lim - seg) *
1533 sizeof (SGE_SIMPLE64);
1535 ce->Length = htole16(ce->Length);
1538 * Fill in the chain list SGE elements with our segment data.
1540 * If we're the last element in this chain list, set the last
1541 * element flag. If we're the completely last element period,
1542 * set the end of list and end of buffer flags.
1544 while (seg < this_seg_lim) {
1546 memset(se, 0, sizeof (*se));
1547 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1548 se->Address.Low = htole32(dm_segs->ds_addr &
1550 if (sizeof (bus_addr_t) > 4) {
1551 addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1552 /* SAS1078 36GB limitation WAR */
1554 (((uint64_t)dm_segs->ds_addr +
1555 MPI_SGE_LENGTH(se->FlagsLength)) >>
1558 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1560 se->Address.High = htole32(addr);
1562 if (seg == this_seg_lim - 1) {
1563 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1565 if (seg == nseg - 1) {
1566 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1567 MPI_SGE_FLAGS_END_OF_BUFFER;
1569 MPI_pSGE_SET_FLAGS(se, tf);
1570 se->FlagsLength = htole32(se->FlagsLength);
1578 * If we have more segments to do and we've used up all of
1579 * the space in a request area, go allocate another one
1580 * and chain to that.
1582 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1585 nrq = mpt_get_request(mpt, FALSE);
1593 * Append the new request area on the tail of our list.
1595 if ((trq = req->chain) == NULL) {
1598 while (trq->chain != NULL) {
1604 mpt_off = trq->req_vbuf;
1605 if (mpt->verbose >= MPT_PRT_DEBUG) {
1606 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1614 * Last time we need to check if this CCB needs to be aborted.
1616 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1617 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1618 request_t *cmd_req =
1619 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1620 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1621 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1622 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1625 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1626 ccb->ccb_h.status & CAM_STATUS_MASK);
1628 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1630 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1631 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1633 mpt_free_request(mpt, req);
1637 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1638 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1639 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
1642 if (mpt->verbose > MPT_PRT_DEBUG) {
1644 mpt_print_request(req->req_vbuf);
1645 for (trq = req->chain; trq; trq = trq->chain) {
1646 printf(" Additional Chain Area %d\n", nc++);
1647 mpt_dump_sgl(trq->req_vbuf, 0);
1651 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1652 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1653 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1654 #ifdef WE_TRUST_AUTO_GOOD_STATUS
1655 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1656 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1657 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1659 tgt->state = TGT_STATE_MOVING_DATA;
1662 tgt->state = TGT_STATE_MOVING_DATA;
1665 mpt_send_cmd(mpt, req);
1669 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1671 request_t *req, *trq;
1674 struct mpt_softc *mpt;
1676 uint32_t flags, nxt_off;
1678 MSG_REQUEST_HEADER *hdrp;
1683 req = (request_t *)arg;
1686 mpt = ccb->ccb_h.ccb_mpt_ptr;
1687 req = ccb->ccb_h.ccb_req_ptr;
1689 hdrp = req->req_vbuf;
1690 mpt_off = req->req_vbuf;
1692 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1697 switch (hdrp->Function) {
1698 case MPI_FUNCTION_SCSI_IO_REQUEST:
1699 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1700 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1702 case MPI_FUNCTION_TARGET_ASSIST:
1704 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1707 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1714 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1716 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1717 nseg, mpt->max_seg_cnt);
1722 if (error != EFBIG && error != ENOMEM) {
1723 mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1725 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1727 mpt_freeze_ccb(ccb);
1728 if (error == EFBIG) {
1729 status = CAM_REQ_TOO_BIG;
1730 } else if (error == ENOMEM) {
1731 if (mpt->outofbeer == 0) {
1733 xpt_freeze_simq(mpt->sim, 1);
1734 mpt_lprt(mpt, MPT_PRT_DEBUG,
1737 status = CAM_REQUEUE_REQ;
1739 status = CAM_REQ_CMP_ERR;
1741 mpt_set_ccb_status(ccb, status);
1743 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1744 request_t *cmd_req =
1745 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1746 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1747 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1748 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1750 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1751 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1753 mpt_free_request(mpt, req);
1758 * No data to transfer?
1759 * Just make a single simple SGL with zero length.
1762 if (mpt->verbose >= MPT_PRT_DEBUG) {
1763 int tidx = ((char *)sglp) - mpt_off;
1764 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1768 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1769 MPI_pSGE_SET_FLAGS(se1,
1770 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1771 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1772 se1->FlagsLength = htole32(se1->FlagsLength);
1777 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1779 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1780 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1783 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1784 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1788 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1789 bus_dmasync_op_t op;
1791 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1792 op = BUS_DMASYNC_PREREAD;
1794 op = BUS_DMASYNC_PREWRITE;
1797 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1798 op = BUS_DMASYNC_PREWRITE;
1800 op = BUS_DMASYNC_PREREAD;
1803 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1807 * Okay, fill in what we can at the end of the command frame.
1808 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1809 * the command frame.
1811 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1812 * SIMPLE32 pointers and start doing CHAIN32 entries after
1816 if (nseg < MPT_NSGL_FIRST(mpt)) {
1820 * Leave room for CHAIN element
1822 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1825 se = (SGE_SIMPLE32 *) sglp;
1826 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1829 memset(se, 0,sizeof (*se));
1830 se->Address = htole32(dm_segs->ds_addr);
1832 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1834 if (seg == first_lim - 1) {
1835 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1837 if (seg == nseg - 1) {
1838 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1839 MPI_SGE_FLAGS_END_OF_BUFFER;
1841 MPI_pSGE_SET_FLAGS(se, tf);
1842 se->FlagsLength = htole32(se->FlagsLength);
1850 * Tell the IOC where to find the first chain element.
1852 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1853 nxt_off = MPT_RQSL(mpt);
1857 * Make up the rest of the data segments out of a chain element
1858 * (contained in the current request frame) which points to
1859 * SIMPLE32 elements in the next request frame, possibly ending
1860 * with *another* chain element (if there's more).
1862 while (seg < nseg) {
1864 uint32_t tf, cur_off;
1865 bus_addr_t chain_list_addr;
1868 * Point to the chain descriptor. Note that the chain
1869 * descriptor is at the end of the *previous* list (whether
1872 ce = (SGE_CHAIN32 *) se;
1875 * Before we change our current pointer, make sure we won't
1876 * overflow the request area with this frame. Note that we
1877 * test against 'greater than' here as it's okay in this case
1878 * to have next offset be just outside the request area.
1880 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1881 nxt_off = MPT_REQUEST_AREA;
1886 * Set our SGE element pointer to the beginning of the chain
1887 * list and update our next chain list offset.
1889 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1891 nxt_off += MPT_RQSL(mpt);
1894 * Now initialize the chain descriptor.
1896 memset(ce, 0, sizeof (*ce));
1899 * Get the physical address of the chain list.
1901 chain_list_addr = trq->req_pbuf;
1902 chain_list_addr += cur_off;
1906 ce->Address = htole32(chain_list_addr);
1907 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1911 * If we have more than a frame's worth of segments left,
1912 * set up the chain list to have the last element be another
1915 if ((nseg - seg) > MPT_NSGL(mpt)) {
1916 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1918 * The length of the chain is the length in bytes of the
1919 * number of segments plus the next chain element.
1921 * The next chain descriptor offset is the length,
1922 * in words, of the number of segments.
1924 ce->Length = (this_seg_lim - seg) *
1925 sizeof (SGE_SIMPLE32);
1926 ce->NextChainOffset = ce->Length >> 2;
1927 ce->Length += sizeof (SGE_CHAIN32);
1929 this_seg_lim = nseg;
1930 ce->Length = (this_seg_lim - seg) *
1931 sizeof (SGE_SIMPLE32);
1933 ce->Length = htole16(ce->Length);
1936 * Fill in the chain list SGE elements with our segment data.
1938 * If we're the last element in this chain list, set the last
1939 * element flag. If we're the completely last element period,
1940 * set the end of list and end of buffer flags.
1942 while (seg < this_seg_lim) {
1943 memset(se, 0, sizeof (*se));
1944 se->Address = htole32(dm_segs->ds_addr);
1946 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1948 if (seg == this_seg_lim - 1) {
1949 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1951 if (seg == nseg - 1) {
1952 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1953 MPI_SGE_FLAGS_END_OF_BUFFER;
1955 MPI_pSGE_SET_FLAGS(se, tf);
1956 se->FlagsLength = htole32(se->FlagsLength);
1964 * If we have more segments to do and we've used up all of
1965 * the space in a request area, go allocate another one
1966 * and chain to that.
1968 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1971 nrq = mpt_get_request(mpt, FALSE);
1979 * Append the new request area on the tail of our list.
1981 if ((trq = req->chain) == NULL) {
1984 while (trq->chain != NULL) {
1990 mpt_off = trq->req_vbuf;
1991 if (mpt->verbose >= MPT_PRT_DEBUG) {
1992 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
2000 * Last time we need to check if this CCB needs to be aborted.
2002 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2003 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2004 request_t *cmd_req =
2005 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2006 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2007 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2008 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2011 "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2012 ccb->ccb_h.status & CAM_STATUS_MASK);
2014 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2016 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2017 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2019 mpt_free_request(mpt, req);
2023 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2024 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2025 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
2028 if (mpt->verbose > MPT_PRT_DEBUG) {
2030 mpt_print_request(req->req_vbuf);
2031 for (trq = req->chain; trq; trq = trq->chain) {
2032 printf(" Additional Chain Area %d\n", nc++);
2033 mpt_dump_sgl(trq->req_vbuf, 0);
2037 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2038 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2039 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2040 #ifdef WE_TRUST_AUTO_GOOD_STATUS
2041 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2042 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2043 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2045 tgt->state = TGT_STATE_MOVING_DATA;
2048 tgt->state = TGT_STATE_MOVING_DATA;
2051 mpt_send_cmd(mpt, req);
2055 mpt_start(struct cam_sim *sim, union ccb *ccb)
2058 struct mpt_softc *mpt;
2059 MSG_SCSI_IO_REQUEST *mpt_req;
2060 struct ccb_scsiio *csio = &ccb->csio;
2061 struct ccb_hdr *ccbh = &ccb->ccb_h;
2062 bus_dmamap_callback_t *cb;
2067 /* Get the pointer for the physical addapter */
2068 mpt = ccb->ccb_h.ccb_mpt_ptr;
2069 raid_passthru = (sim == mpt->phydisk_sim);
2071 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2072 if (mpt->outofbeer == 0) {
2074 xpt_freeze_simq(mpt->sim, 1);
2075 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2077 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2078 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2083 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2086 if (sizeof (bus_addr_t) > 4) {
2087 cb = mpt_execute_req_a64;
2089 cb = mpt_execute_req;
2093 * Link the ccb and the request structure so we can find
2094 * the other knowing either the request or the ccb
2097 ccb->ccb_h.ccb_req_ptr = req;
2099 /* Now we build the command for the IOC */
2100 mpt_req = req->req_vbuf;
2101 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2103 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2104 if (raid_passthru) {
2105 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2106 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2107 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2108 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2112 mpt_req->Bus = 0; /* we never set bus here */
2114 tgt = ccb->ccb_h.target_id;
2115 mpt_req->Bus = 0; /* XXX */
2118 mpt_req->SenseBufferLength =
2119 (csio->sense_len < MPT_SENSE_SIZE) ?
2120 csio->sense_len : MPT_SENSE_SIZE;
2123 * We use the message context to find the request structure when we
2124 * Get the command completion interrupt from the IOC.
2126 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2128 /* Which physical device to do the I/O on */
2129 mpt_req->TargetID = tgt;
2131 /* We assume a single level LUN type */
2132 if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) {
2133 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
2134 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
2136 mpt_req->LUN[1] = ccb->ccb_h.target_lun;
2139 /* Set the direction of the transfer */
2140 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2141 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2142 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2143 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2145 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2148 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2149 switch(ccb->csio.tag_action) {
2150 case MSG_HEAD_OF_Q_TAG:
2151 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2154 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2156 case MSG_ORDERED_Q_TAG:
2157 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2159 case MSG_SIMPLE_Q_TAG:
2161 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2165 if (mpt->is_fc || mpt->is_sas) {
2166 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2168 /* XXX No such thing for a target doing packetized. */
2169 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2174 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2175 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2178 mpt_req->Control = htole32(mpt_req->Control);
2180 /* Copy the scsi command block into place */
2181 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2182 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2184 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2187 mpt_req->CDBLength = csio->cdb_len;
2188 mpt_req->DataLength = htole32(csio->dxfer_len);
2189 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2192 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2194 if (mpt->verbose == MPT_PRT_DEBUG) {
2196 mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2197 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2198 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2199 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2200 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2201 mpt_prtc(mpt, "(%s %u byte%s ",
2202 (df == MPI_SCSIIO_CONTROL_READ)?
2203 "read" : "write", csio->dxfer_len,
2204 (csio->dxfer_len == 1)? ")" : "s)");
2206 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
2207 ccb->ccb_h.target_lun, req, req->serno);
2210 error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb,
2212 if (error == EINPROGRESS) {
2214 * So as to maintain ordering, freeze the controller queue
2215 * until our mapping is returned.
2217 xpt_freeze_simq(mpt->sim, 1);
2218 ccbh->status |= CAM_RELEASE_SIMQ;
2223 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2230 error = mpt_scsi_send_tmf(mpt,
2231 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2232 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2233 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2234 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2235 0, /* XXX How do I get the channel ID? */
2236 tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2237 lun != CAM_LUN_WILDCARD ? lun : 0,
2242 * mpt_scsi_send_tmf hard resets on failure, so no
2243 * need to do so here.
2246 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2250 /* Wait for bus reset to be processed by the IOC. */
2251 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2252 REQ_STATE_DONE, sleep_ok, 5000);
2254 status = le16toh(mpt->tmf_req->IOCStatus);
2255 response = mpt->tmf_req->ResponseCode;
2256 mpt->tmf_req->state = REQ_STATE_FREE;
2259 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2260 "Resetting controller.\n");
2261 mpt_reset(mpt, TRUE);
2265 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2266 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2267 "Resetting controller.\n", status);
2268 mpt_reset(mpt, TRUE);
2272 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2273 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2274 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2275 "Resetting controller.\n", response);
2276 mpt_reset(mpt, TRUE);
2283 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2287 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2289 req = mpt_get_request(mpt, FALSE);
2294 memset(fc, 0, sizeof(*fc));
2295 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2296 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2297 fc->MsgContext = htole32(req->index | fc_els_handler_id);
2298 mpt_send_cmd(mpt, req);
2300 r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2301 REQ_STATE_DONE, FALSE, 60 * 1000);
2303 mpt_free_request(mpt, req);
2310 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2311 MSG_EVENT_NOTIFY_REPLY *msg)
2313 uint32_t data0, data1;
2315 data0 = le32toh(msg->Data[0]);
2316 data1 = le32toh(msg->Data[1]);
2317 switch(msg->Event & 0xFF) {
2318 case MPI_EVENT_UNIT_ATTENTION:
2319 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2320 (data0 >> 8) & 0xff, data0 & 0xff);
2323 case MPI_EVENT_IOC_BUS_RESET:
2324 /* We generated a bus reset */
2325 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2326 (data0 >> 8) & 0xff);
2327 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2330 case MPI_EVENT_EXT_BUS_RESET:
2331 /* Someone else generated a bus reset */
2332 mpt_prt(mpt, "External Bus Reset Detected\n");
2334 * These replies don't return EventData like the MPI
2337 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2340 case MPI_EVENT_RESCAN:
2341 #if __FreeBSD_version >= 600000
2346 * In general this means a device has been added to the loop.
2348 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2349 if (mpt->ready == 0) {
2352 if (mpt->phydisk_sim) {
2353 pathid = cam_sim_path(mpt->phydisk_sim);
2355 pathid = cam_sim_path(mpt->sim);
2358 * Allocate a CCB, create a wildcard path for this bus,
2359 * and schedule a rescan.
2361 ccb = xpt_alloc_ccb_nowait();
2363 mpt_prt(mpt, "unable to alloc CCB for rescan\n");
2367 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
2368 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2369 mpt_prt(mpt, "unable to create path for rescan\n");
2377 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2380 case MPI_EVENT_LINK_STATUS_CHANGE:
2381 mpt_prt(mpt, "Port %d: LinkState: %s\n",
2382 (data1 >> 8) & 0xff,
2383 ((data0 & 0xff) == 0)? "Failed" : "Active");
2386 case MPI_EVENT_LOOP_STATE_CHANGE:
2387 switch ((data0 >> 16) & 0xff) {
2390 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2391 "(Loop Initialization)\n",
2392 (data1 >> 8) & 0xff,
2393 (data0 >> 8) & 0xff,
2395 switch ((data0 >> 8) & 0xff) {
2397 if ((data0 & 0xff) == 0xF7) {
2398 mpt_prt(mpt, "Device needs AL_PA\n");
2400 mpt_prt(mpt, "Device %02x doesn't like "
2406 if ((data0 & 0xff) == 0xF7) {
2407 mpt_prt(mpt, "Device had loop failure "
2408 "at its receiver prior to acquiring"
2411 mpt_prt(mpt, "Device %02x detected loop"
2412 " failure at its receiver\n",
2417 mpt_prt(mpt, "Device %02x requests that device "
2418 "%02x reset itself\n",
2420 (data0 >> 8) & 0xFF);
2425 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2426 "LPE(%02x,%02x) (Loop Port Enable)\n",
2427 (data1 >> 8) & 0xff, /* Port */
2428 (data0 >> 8) & 0xff, /* Character 3 */
2429 (data0 ) & 0xff /* Character 4 */);
2432 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2433 "LPB(%02x,%02x) (Loop Port Bypass)\n",
2434 (data1 >> 8) & 0xff, /* Port */
2435 (data0 >> 8) & 0xff, /* Character 3 */
2436 (data0 ) & 0xff /* Character 4 */);
2439 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2440 "FC event (%02x %02x %02x)\n",
2441 (data1 >> 8) & 0xff, /* Port */
2442 (data0 >> 16) & 0xff, /* Event */
2443 (data0 >> 8) & 0xff, /* Character 3 */
2444 (data0 ) & 0xff /* Character 4 */);
2448 case MPI_EVENT_LOGOUT:
2449 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2450 (data1 >> 8) & 0xff, data0);
2452 case MPI_EVENT_QUEUE_FULL:
2454 struct cam_sim *sim;
2455 struct cam_path *tmppath;
2456 struct ccb_relsim crs;
2457 PTR_EVENT_DATA_QUEUE_FULL pqf;
2460 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2461 pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2462 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2463 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2464 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2465 pqf->TargetID) != 0) {
2466 sim = mpt->phydisk_sim;
2470 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2471 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2472 pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2473 mpt_prt(mpt, "unable to create a path to send "
2477 xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2478 crs.ccb_h.func_code = XPT_REL_SIMQ;
2479 crs.ccb_h.flags = CAM_DEV_QFREEZE;
2480 crs.release_flags = RELSIM_ADJUST_OPENINGS;
2481 crs.openings = pqf->CurrentDepth - 1;
2482 xpt_action((union ccb *)&crs);
2483 if (crs.ccb_h.status != CAM_REQ_CMP) {
2484 mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2486 xpt_free_path(tmppath);
2490 case MPI_EVENT_IR_RESYNC_UPDATE:
2491 mpt_prt(mpt, "IR resync update %d completed\n",
2492 (data0 >> 16) & 0xff);
2494 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2497 struct cam_sim *sim;
2498 struct cam_path *tmppath;
2499 PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
2501 psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
2502 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2503 psdsc->TargetID) != 0)
2504 sim = mpt->phydisk_sim;
2507 switch(psdsc->ReasonCode) {
2508 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2509 ccb = xpt_alloc_ccb_nowait();
2512 "unable to alloc CCB for rescan\n");
2515 if (xpt_create_path(&ccb->ccb_h.path, NULL,
2516 cam_sim_path(sim), psdsc->TargetID,
2517 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2519 "unable to create path for rescan\n");
2525 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2526 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2527 psdsc->TargetID, CAM_LUN_WILDCARD) !=
2530 "unable to create path for async event");
2533 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2534 xpt_free_path(tmppath);
2536 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
2537 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
2538 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2541 mpt_lprt(mpt, MPT_PRT_WARN,
2542 "SAS device status change: Bus: 0x%02x TargetID: "
2543 "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
2544 psdsc->TargetID, psdsc->ReasonCode);
2549 case MPI_EVENT_SAS_DISCOVERY_ERROR:
2551 PTR_EVENT_DATA_DISCOVERY_ERROR pde;
2553 pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
2554 pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
2555 mpt_lprt(mpt, MPT_PRT_WARN,
2556 "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
2557 pde->Port, pde->DiscoveryStatus);
2560 case MPI_EVENT_EVENT_CHANGE:
2561 case MPI_EVENT_INTEGRATED_RAID:
2563 case MPI_EVENT_LOG_ENTRY_ADDED:
2564 case MPI_EVENT_SAS_DISCOVERY:
2565 case MPI_EVENT_SAS_PHY_LINK_STATUS:
2566 case MPI_EVENT_SAS_SES:
2569 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2577 * Reply path for all SCSI I/O requests, called from our
2578 * interrupt handler by extracting our handler index from
2579 * the MsgContext field of the reply from the IOC.
2581 * This routine is optimized for the common case of a
2582 * completion without error. All exception handling is
2583 * offloaded to non-inlined helper routines to minimize
2587 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2588 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2590 MSG_SCSI_IO_REQUEST *scsi_req;
2593 if (req->state == REQ_STATE_FREE) {
2594 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2598 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2601 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2606 mpt_req_untimeout(req, mpt_timeout, ccb);
2607 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2609 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2610 bus_dmasync_op_t op;
2612 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2613 op = BUS_DMASYNC_POSTREAD;
2615 op = BUS_DMASYNC_POSTWRITE;
2616 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2617 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2620 if (reply_frame == NULL) {
2622 * Context only reply, completion without error status.
2624 ccb->csio.resid = 0;
2625 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2626 ccb->csio.scsi_status = SCSI_STATUS_OK;
2628 mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2631 if (mpt->outofbeer) {
2632 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2634 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2636 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2637 struct scsi_inquiry_data *iq =
2638 (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2639 if (scsi_req->Function ==
2640 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2642 * Fake out the device type so that only the
2643 * pass-thru device will attach.
2645 iq->device &= ~0x1F;
2646 iq->device |= T_NODEVICE;
2649 if (mpt->verbose == MPT_PRT_DEBUG) {
2650 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2653 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2655 if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2656 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2658 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2660 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2662 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2663 ("CCB req needed wakeup"));
2665 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2667 mpt_free_request(mpt, req);
2672 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2673 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2675 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2677 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2679 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2681 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2682 /* Record IOC Status and Response Code of TMF for any waiters. */
2683 req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2684 req->ResponseCode = tmf_reply->ResponseCode;
2686 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2687 req, req->serno, le16toh(tmf_reply->IOCStatus));
2688 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2689 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2690 req->state |= REQ_STATE_DONE;
2693 mpt->tmf_req->state = REQ_STATE_FREE;
2699 * XXX: Move to definitions file
2717 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2718 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2721 MSG_LINK_SERVICE_RSP_REQUEST tmp;
2722 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2725 * We are going to reuse the ELS request to send this response back.
2728 memset(rsp, 0, sizeof(*rsp));
2730 #ifdef USE_IMMEDIATE_LINK_DATA
2732 * Apparently the IMMEDIATE stuff doesn't seem to work.
2734 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2736 rsp->RspLength = length;
2737 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2738 rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2741 * Copy over information from the original reply frame to
2742 * it's correct place in the response.
2744 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2747 * And now copy back the temporary area to the original frame.
2749 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2750 rsp = req->req_vbuf;
2752 #ifdef USE_IMMEDIATE_LINK_DATA
2753 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2756 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2757 bus_addr_t paddr = req->req_pbuf;
2758 paddr += MPT_RQSL(mpt);
2761 MPI_SGE_FLAGS_HOST_TO_IOC |
2762 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2763 MPI_SGE_FLAGS_LAST_ELEMENT |
2764 MPI_SGE_FLAGS_END_OF_LIST |
2765 MPI_SGE_FLAGS_END_OF_BUFFER;
2766 fl <<= MPI_SGE_FLAGS_SHIFT;
2768 se->FlagsLength = htole32(fl);
2769 se->Address = htole32((uint32_t) paddr);
2776 mpt_send_cmd(mpt, req);
2780 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2781 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2783 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2784 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2788 U16 status = le16toh(reply_frame->IOCStatus);
2791 int do_refresh = TRUE;
2794 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2795 ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2796 req, req->serno, rp->Function));
2797 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2798 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2800 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2803 mpt_lprt(mpt, MPT_PRT_DEBUG,
2804 "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2805 req, req->serno, reply_frame, reply_frame->Function);
2807 if (status != MPI_IOCSTATUS_SUCCESS) {
2808 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2809 status, reply_frame->Function);
2810 if (status == MPI_IOCSTATUS_INVALID_STATE) {
2812 * XXX: to get around shutdown issue
2821 * If the function of a link service response, we recycle the
2822 * response to be a refresh for a new link service request.
2824 * The request pointer is bogus in this case and we have to fetch
2825 * it based upon the TransactionContext.
2827 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2828 /* Freddie Uncle Charlie Katie */
2829 /* We don't get the IOINDEX as part of the Link Svc Rsp */
2830 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2831 if (mpt->els_cmd_ptrs[ioindex] == req) {
2835 KASSERT(ioindex < mpt->els_cmds_allocated,
2836 ("can't find my mommie!"));
2838 /* remove from active list as we're going to re-post it */
2839 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2840 req->state &= ~REQ_STATE_QUEUED;
2841 req->state |= REQ_STATE_DONE;
2842 mpt_fc_post_els(mpt, req, ioindex);
2846 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2847 /* remove from active list as we're done */
2848 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2849 req->state &= ~REQ_STATE_QUEUED;
2850 req->state |= REQ_STATE_DONE;
2851 if (req->state & REQ_STATE_TIMEDOUT) {
2852 mpt_lprt(mpt, MPT_PRT_DEBUG,
2853 "Sync Primitive Send Completed After Timeout\n");
2854 mpt_free_request(mpt, req);
2855 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2856 mpt_lprt(mpt, MPT_PRT_DEBUG,
2857 "Async Primitive Send Complete\n");
2858 mpt_free_request(mpt, req);
2860 mpt_lprt(mpt, MPT_PRT_DEBUG,
2861 "Sync Primitive Send Complete- Waking Waiter\n");
2867 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2868 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2869 "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2870 rp->MsgLength, rp->MsgFlags);
2874 if (rp->MsgLength <= 5) {
2876 * This is just a ack of an original ELS buffer post
2878 mpt_lprt(mpt, MPT_PRT_DEBUG,
2879 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2884 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2885 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2887 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2888 cmd = be32toh(elsbuf[0]) >> 24;
2890 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2891 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2895 ioindex = le32toh(rp->TransactionContext);
2896 req = mpt->els_cmd_ptrs[ioindex];
2898 if (rctl == ELS && type == 1) {
2902 * Send back a PRLI ACC
2904 mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2905 le32toh(rp->Wwn.PortNameHigh),
2906 le32toh(rp->Wwn.PortNameLow));
2907 elsbuf[0] = htobe32(0x02100014);
2908 elsbuf[1] |= htobe32(0x00000100);
2909 elsbuf[4] = htobe32(0x00000002);
2910 if (mpt->role & MPT_ROLE_TARGET)
2911 elsbuf[4] |= htobe32(0x00000010);
2912 if (mpt->role & MPT_ROLE_INITIATOR)
2913 elsbuf[4] |= htobe32(0x00000020);
2914 /* remove from active list as we're done */
2915 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2916 req->state &= ~REQ_STATE_QUEUED;
2917 req->state |= REQ_STATE_DONE;
2918 mpt_fc_els_send_response(mpt, req, rp, 20);
2922 memset(elsbuf, 0, 5 * (sizeof (U32)));
2923 elsbuf[0] = htobe32(0x02100014);
2924 elsbuf[1] = htobe32(0x08000100);
2925 mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2926 le32toh(rp->Wwn.PortNameHigh),
2927 le32toh(rp->Wwn.PortNameLow));
2928 /* remove from active list as we're done */
2929 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2930 req->state &= ~REQ_STATE_QUEUED;
2931 req->state |= REQ_STATE_DONE;
2932 mpt_fc_els_send_response(mpt, req, rp, 20);
2936 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2939 } else if (rctl == ABTS && type == 0) {
2940 uint16_t rx_id = le16toh(rp->Rxid);
2941 uint16_t ox_id = le16toh(rp->Oxid);
2942 request_t *tgt_req = NULL;
2945 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2946 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2947 le32toh(rp->Wwn.PortNameLow));
2948 if (rx_id >= mpt->mpt_max_tgtcmds) {
2949 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2950 } else if (mpt->tgt_cmd_ptrs == NULL) {
2951 mpt_prt(mpt, "No TGT CMD PTRS\n");
2953 tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2956 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2961 * Check to make sure we have the correct command
2962 * The reply descriptor in the target state should
2963 * should contain an IoIndex that should match the
2966 * It'd be nice to have OX_ID to crosscheck with
2969 ct_id = GET_IO_INDEX(tgt->reply_desc);
2971 if (ct_id != rx_id) {
2972 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2973 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2981 "CCB (%p): lun %u flags %x status %x\n",
2982 ccb, ccb->ccb_h.target_lun,
2983 ccb->ccb_h.flags, ccb->ccb_h.status);
2985 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2986 "%x nxfers %x\n", tgt->state,
2987 tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2990 if (mpt_abort_target_cmd(mpt, tgt_req)) {
2991 mpt_prt(mpt, "unable to start TargetAbort\n");
2994 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2996 memset(elsbuf, 0, 5 * (sizeof (U32)));
2997 elsbuf[0] = htobe32(0);
2998 elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2999 elsbuf[2] = htobe32(0x000ffff);
3001 * Dork with the reply frame so that the response to it
3004 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
3005 /* remove from active list as we're done */
3006 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3007 req->state &= ~REQ_STATE_QUEUED;
3008 req->state |= REQ_STATE_DONE;
3009 mpt_fc_els_send_response(mpt, req, rp, 12);
3012 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3014 if (do_refresh == TRUE) {
3015 /* remove from active list as we're done */
3016 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3017 req->state &= ~REQ_STATE_QUEUED;
3018 req->state |= REQ_STATE_DONE;
3019 mpt_fc_post_els(mpt, req, ioindex);
3025 * Clean up all SCSI Initiator personality state in response
3026 * to a controller reset.
3029 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3033 * The pending list is already run down by
3034 * the generic handler. Perform the same
3035 * operation on the timed out request list.
3037 mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3038 MPI_IOCSTATUS_INVALID_STATE);
3041 * XXX: We need to repost ELS and Target Command Buffers?
3045 * Inform the XPT that a bus reset has occurred.
3047 xpt_async(AC_BUS_RESET, mpt->path, NULL);
3051 * Parse additional completion information in the reply
3052 * frame for SCSI I/O requests.
3055 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3056 MSG_DEFAULT_REPLY *reply_frame)
3059 MSG_SCSI_IO_REPLY *scsi_io_reply;
3063 MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3064 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3065 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3066 ("MPT SCSI I/O Handler called with incorrect reply type"));
3067 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3068 ("MPT SCSI I/O Handler called with continuation reply"));
3070 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3071 ioc_status = le16toh(scsi_io_reply->IOCStatus);
3072 ioc_status &= MPI_IOCSTATUS_MASK;
3073 sstate = scsi_io_reply->SCSIState;
3077 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3079 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3080 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3081 uint32_t sense_returned;
3083 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3085 sense_returned = le32toh(scsi_io_reply->SenseCount);
3086 if (sense_returned < ccb->csio.sense_len)
3087 ccb->csio.sense_resid = ccb->csio.sense_len -
3090 ccb->csio.sense_resid = 0;
3092 bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
3093 bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3094 min(ccb->csio.sense_len, sense_returned));
3097 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3099 * Tag messages rejected, but non-tagged retry
3102 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3106 switch(ioc_status) {
3107 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3110 * Linux driver indicates that a zero
3111 * transfer length with this error code
3112 * indicates a CRC error.
3114 * No need to swap the bytes for checking
3117 if (scsi_io_reply->TransferCount == 0) {
3118 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3122 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3123 case MPI_IOCSTATUS_SUCCESS:
3124 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3125 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3127 * Status was never returned for this transaction.
3129 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3130 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3131 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3132 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3133 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3134 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3135 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3137 /* XXX Handle SPI-Packet and FCP-2 response info. */
3138 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3140 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3142 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3143 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3145 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3146 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3148 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3150 * Since selection timeouts and "device really not
3151 * there" are grouped into this error code, report
3152 * selection timeout. Selection timeouts are
3153 * typically retried before giving up on the device
3154 * whereas "device not there" errors are considered
3157 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3159 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3160 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3162 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3163 mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3165 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3166 mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3168 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3169 ccb->ccb_h.status = CAM_UA_TERMIO;
3171 case MPI_IOCSTATUS_INVALID_STATE:
3173 * The IOC has been reset. Emulate a bus reset.
3176 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3177 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3179 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3180 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3182 * Don't clobber any timeout status that has
3183 * already been set for this transaction. We
3184 * want the SCSI layer to be able to differentiate
3185 * between the command we aborted due to timeout
3186 * and any innocent bystanders.
3188 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3190 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3193 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3194 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3196 case MPI_IOCSTATUS_BUSY:
3197 mpt_set_ccb_status(ccb, CAM_BUSY);
3199 case MPI_IOCSTATUS_INVALID_FUNCTION:
3200 case MPI_IOCSTATUS_INVALID_SGL:
3201 case MPI_IOCSTATUS_INTERNAL_ERROR:
3202 case MPI_IOCSTATUS_INVALID_FIELD:
3205 * Some of the above may need to kick
3206 * of a recovery action!!!!
3208 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3212 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3213 mpt_freeze_ccb(ccb);
3220 mpt_action(struct cam_sim *sim, union ccb *ccb)
3222 struct mpt_softc *mpt;
3223 struct ccb_trans_settings *cts;
3228 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3230 mpt = (struct mpt_softc *)cam_sim_softc(sim);
3231 raid_passthru = (sim == mpt->phydisk_sim);
3232 MPT_LOCK_ASSERT(mpt);
3234 tgt = ccb->ccb_h.target_id;
3235 lun = ccb->ccb_h.target_lun;
3236 if (raid_passthru &&
3237 ccb->ccb_h.func_code != XPT_PATH_INQ &&
3238 ccb->ccb_h.func_code != XPT_RESET_BUS &&
3239 ccb->ccb_h.func_code != XPT_RESET_DEV) {
3240 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3241 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3242 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3247 ccb->ccb_h.ccb_mpt_ptr = mpt;
3249 switch (ccb->ccb_h.func_code) {
3250 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3252 * Do a couple of preliminary checks...
3254 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3255 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3256 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3257 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3261 /* Max supported CDB length is 16 bytes */
3262 /* XXX Unless we implement the new 32byte message type */
3263 if (ccb->csio.cdb_len >
3264 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3265 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3266 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3269 #ifdef MPT_TEST_MULTIPATH
3270 if (mpt->failure_id == ccb->ccb_h.target_id) {
3271 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3272 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3276 ccb->csio.scsi_status = SCSI_STATUS_OK;
3277 mpt_start(sim, ccb);
3281 if (raid_passthru) {
3282 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3283 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3287 if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3289 xpt_print(ccb->ccb_h.path, "reset bus\n");
3292 xpt_print(ccb->ccb_h.path, "reset device\n");
3294 (void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3297 * mpt_bus_reset is always successful in that it
3298 * will fall back to a hard reset should a bus
3299 * reset attempt fail.
3301 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3302 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3307 union ccb *accb = ccb->cab.abort_ccb;
3308 switch (accb->ccb_h.func_code) {
3309 case XPT_ACCEPT_TARGET_IO:
3310 case XPT_IMMEDIATE_NOTIFY:
3311 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3313 case XPT_CONT_TARGET_IO:
3314 mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3315 ccb->ccb_h.status = CAM_UA_ABORT;
3318 ccb->ccb_h.status = CAM_UA_ABORT;
3321 ccb->ccb_h.status = CAM_REQ_INVALID;
3327 #ifdef CAM_NEW_TRAN_CODE
3328 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3330 #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
3332 #define DP_DISC_ENABLE 0x1
3333 #define DP_DISC_DISABL 0x2
3334 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
3336 #define DP_TQING_ENABLE 0x4
3337 #define DP_TQING_DISABL 0x8
3338 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
3340 #define DP_WIDE 0x10
3341 #define DP_NARROW 0x20
3342 #define DP_WIDTH (DP_WIDE|DP_NARROW)
3344 #define DP_SYNC 0x40
3346 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
3348 #ifdef CAM_NEW_TRAN_CODE
3349 struct ccb_trans_settings_scsi *scsi;
3350 struct ccb_trans_settings_spi *spi;
3359 if (mpt->is_fc || mpt->is_sas) {
3360 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3364 #ifdef CAM_NEW_TRAN_CODE
3365 scsi = &cts->proto_specific.scsi;
3366 spi = &cts->xport_specific.spi;
3369 * We can be called just to valid transport and proto versions
3371 if (scsi->valid == 0 && spi->valid == 0) {
3372 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3378 * Skip attempting settings on RAID volume disks.
3379 * Other devices on the bus get the normal treatment.
3381 if (mpt->phydisk_sim && raid_passthru == 0 &&
3382 mpt_is_raid_volume(mpt, tgt) != 0) {
3383 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3384 "no transfer settings for RAID vols\n");
3385 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3389 i = mpt->mpt_port_page2.PortSettings &
3390 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3391 j = mpt->mpt_port_page2.PortFlags &
3392 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3393 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3394 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3395 mpt_lprt(mpt, MPT_PRT_ALWAYS,
3396 "honoring BIOS transfer negotiations\n");
3397 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3405 #ifndef CAM_NEW_TRAN_CODE
3406 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
3407 dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
3408 DP_DISC_ENABLE : DP_DISC_DISABL;
3411 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
3412 dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
3413 DP_TQING_ENABLE : DP_TQING_DISABL;
3416 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
3417 dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
3420 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
3421 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
3423 period = cts->sync_period;
3424 offset = cts->sync_offset;
3427 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3428 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3429 DP_DISC_ENABLE : DP_DISC_DISABL;
3432 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3433 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3434 DP_TQING_ENABLE : DP_TQING_DISABL;
3437 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3438 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3439 DP_WIDE : DP_NARROW;
3442 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3444 offset = spi->sync_offset;
3446 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3447 &mpt->mpt_dev_page1[tgt];
3448 offset = ptr->RequestedParameters;
3449 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3450 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3452 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3454 period = spi->sync_period;
3456 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3457 &mpt->mpt_dev_page1[tgt];
3458 period = ptr->RequestedParameters;
3459 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3460 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3463 if (dval & DP_DISC_ENABLE) {
3464 mpt->mpt_disc_enable |= (1 << tgt);
3465 } else if (dval & DP_DISC_DISABL) {
3466 mpt->mpt_disc_enable &= ~(1 << tgt);
3468 if (dval & DP_TQING_ENABLE) {
3469 mpt->mpt_tag_enable |= (1 << tgt);
3470 } else if (dval & DP_TQING_DISABL) {
3471 mpt->mpt_tag_enable &= ~(1 << tgt);
3473 if (dval & DP_WIDTH) {
3474 mpt_setwidth(mpt, tgt, 1);
3476 if (dval & DP_SYNC) {
3477 mpt_setsync(mpt, tgt, period, offset);
3480 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3483 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3484 "set [%d]: 0x%x period 0x%x offset %d\n",
3485 tgt, dval, period, offset);
3486 if (mpt_update_spi_config(mpt, tgt)) {
3487 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3489 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3493 case XPT_GET_TRAN_SETTINGS:
3495 #ifdef CAM_NEW_TRAN_CODE
3496 struct ccb_trans_settings_scsi *scsi;
3498 cts->protocol = PROTO_SCSI;
3500 struct ccb_trans_settings_fc *fc =
3501 &cts->xport_specific.fc;
3502 cts->protocol_version = SCSI_REV_SPC;
3503 cts->transport = XPORT_FC;
3504 cts->transport_version = 0;
3505 fc->valid = CTS_FC_VALID_SPEED;
3506 fc->bitrate = 100000;
3507 } else if (mpt->is_sas) {
3508 struct ccb_trans_settings_sas *sas =
3509 &cts->xport_specific.sas;
3510 cts->protocol_version = SCSI_REV_SPC2;
3511 cts->transport = XPORT_SAS;
3512 cts->transport_version = 0;
3513 sas->valid = CTS_SAS_VALID_SPEED;
3514 sas->bitrate = 300000;
3516 cts->protocol_version = SCSI_REV_2;
3517 cts->transport = XPORT_SPI;
3518 cts->transport_version = 2;
3519 if (mpt_get_spi_settings(mpt, cts) != 0) {
3520 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3524 scsi = &cts->proto_specific.scsi;
3525 scsi->valid = CTS_SCSI_VALID_TQ;
3526 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3530 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3531 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3532 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3533 } else if (mpt->is_sas) {
3534 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3535 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3536 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3537 } else if (mpt_get_spi_settings(mpt, cts) != 0) {
3538 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3542 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3545 case XPT_CALC_GEOMETRY:
3547 struct ccb_calc_geometry *ccg;
3550 if (ccg->block_size == 0) {
3551 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3552 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3555 cam_calc_geometry(ccg, /* extended */ 1);
3556 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
3559 case XPT_PATH_INQ: /* Path routing inquiry */
3561 struct ccb_pathinq *cpi = &ccb->cpi;
3563 cpi->version_num = 1;
3564 cpi->target_sprt = 0;
3565 cpi->hba_eng_cnt = 0;
3566 cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3567 cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3569 * FC cards report MAX_DEVICES of 512, but
3570 * the MSG_SCSI_IO_REQUEST target id field
3571 * is only 8 bits. Until we fix the driver
3572 * to support 'channels' for bus overflow,
3575 if (cpi->max_target > 255) {
3576 cpi->max_target = 255;
3580 * VMware ESX reports > 16 devices and then dies when we probe.
3582 if (mpt->is_spi && cpi->max_target > 15) {
3583 cpi->max_target = 15;
3588 cpi->max_lun = MPT_MAX_LUNS;
3589 cpi->initiator_id = mpt->mpt_ini_id;
3590 cpi->bus_id = cam_sim_bus(sim);
3593 * The base speed is the speed of the underlying connection.
3595 #ifdef CAM_NEW_TRAN_CODE
3596 cpi->protocol = PROTO_SCSI;
3598 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
3599 cpi->base_transfer_speed = 100000;
3600 cpi->hba_inquiry = PI_TAG_ABLE;
3601 cpi->transport = XPORT_FC;
3602 cpi->transport_version = 0;
3603 cpi->protocol_version = SCSI_REV_SPC;
3604 } else if (mpt->is_sas) {
3605 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
3606 cpi->base_transfer_speed = 300000;
3607 cpi->hba_inquiry = PI_TAG_ABLE;
3608 cpi->transport = XPORT_SAS;
3609 cpi->transport_version = 0;
3610 cpi->protocol_version = SCSI_REV_SPC2;
3612 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
3613 cpi->base_transfer_speed = 3300;
3614 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3615 cpi->transport = XPORT_SPI;
3616 cpi->transport_version = 2;
3617 cpi->protocol_version = SCSI_REV_2;
3621 cpi->hba_misc = PIM_NOBUSRESET;
3622 cpi->base_transfer_speed = 100000;
3623 cpi->hba_inquiry = PI_TAG_ABLE;
3624 } else if (mpt->is_sas) {
3625 cpi->hba_misc = PIM_NOBUSRESET;
3626 cpi->base_transfer_speed = 300000;
3627 cpi->hba_inquiry = PI_TAG_ABLE;
3629 cpi->hba_misc = PIM_SEQSCAN;
3630 cpi->base_transfer_speed = 3300;
3631 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3636 * We give our fake RAID passhtru bus a width that is MaxVolumes
3637 * wide and restrict it to one lun.
3639 if (raid_passthru) {
3640 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3641 cpi->initiator_id = cpi->max_target + 1;
3645 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3646 cpi->hba_misc |= PIM_NOINITIATOR;
3648 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3650 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3652 cpi->target_sprt = 0;
3654 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3655 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3656 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3657 cpi->unit_number = cam_sim_unit(sim);
3658 cpi->ccb_h.status = CAM_REQ_CMP;
3661 case XPT_EN_LUN: /* Enable LUN as a target */
3665 if (ccb->cel.enable)
3666 result = mpt_enable_lun(mpt,
3667 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3669 result = mpt_disable_lun(mpt,
3670 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3672 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3674 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3678 case XPT_NOTIFY_ACKNOWLEDGE: /* recycle notify ack */
3679 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */
3680 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
3682 tgt_resource_t *trtp;
3683 lun_id_t lun = ccb->ccb_h.target_lun;
3684 ccb->ccb_h.sim_priv.entries[0].field = 0;
3685 ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3686 ccb->ccb_h.flags = 0;
3688 if (lun == CAM_LUN_WILDCARD) {
3689 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3690 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3693 trtp = &mpt->trt_wildcard;
3694 } else if (lun >= MPT_MAX_LUNS) {
3695 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3698 trtp = &mpt->trt[lun];
3700 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3701 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3702 "Put FREE ATIO %p lun %d\n", ccb, lun);
3703 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3705 } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) {
3706 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3707 "Put FREE INOT lun %d\n", lun);
3708 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3711 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3713 mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3716 case XPT_CONT_TARGET_IO:
3717 mpt_target_start_io(mpt, ccb);
3721 ccb->ccb_h.status = CAM_REQ_INVALID;
3728 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3730 #ifdef CAM_NEW_TRAN_CODE
3731 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3732 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3735 uint32_t dval, pval, oval;
3738 if (IS_CURRENT_SETTINGS(cts) == 0) {
3739 tgt = cts->ccb_h.target_id;
3740 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3741 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3745 tgt = cts->ccb_h.target_id;
3749 * We aren't looking at Port Page 2 BIOS settings here-
3750 * sometimes these have been known to be bogus XXX.
3752 * For user settings, we pick the max from port page 0
3754 * For current settings we read the current settings out from
3755 * device page 0 for that target.
3757 if (IS_CURRENT_SETTINGS(cts)) {
3758 CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3761 tmp = mpt->mpt_dev_page0[tgt];
3762 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3763 sizeof(tmp), FALSE, 5000);
3765 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3768 mpt2host_config_page_scsi_device_0(&tmp);
3770 mpt_lprt(mpt, MPT_PRT_DEBUG,
3771 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3772 tmp.NegotiatedParameters, tmp.Information);
3773 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3774 DP_WIDE : DP_NARROW;
3775 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3776 DP_DISC_ENABLE : DP_DISC_DISABL;
3777 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3778 DP_TQING_ENABLE : DP_TQING_DISABL;
3779 oval = tmp.NegotiatedParameters;
3780 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3781 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3782 pval = tmp.NegotiatedParameters;
3783 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3784 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3785 mpt->mpt_dev_page0[tgt] = tmp;
3787 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3788 oval = mpt->mpt_port_page0.Capabilities;
3789 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3790 pval = mpt->mpt_port_page0.Capabilities;
3791 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3794 #ifndef CAM_NEW_TRAN_CODE
3795 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3797 cts->sync_period = pval;
3798 cts->sync_offset = oval;
3799 cts->valid |= CCB_TRANS_SYNC_RATE_VALID;
3800 cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID;
3801 cts->valid |= CCB_TRANS_BUS_WIDTH_VALID;
3802 if (dval & DP_WIDE) {
3803 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3805 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3807 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3808 cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3809 if (dval & DP_DISC_ENABLE) {
3810 cts->flags |= CCB_TRANS_DISC_ENB;
3812 if (dval & DP_TQING_ENABLE) {
3813 cts->flags |= CCB_TRANS_TAG_ENB;
3821 spi->sync_offset = oval;
3822 spi->sync_period = pval;
3823 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3824 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3825 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3826 if (dval & DP_WIDE) {
3827 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3829 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3831 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3832 scsi->valid = CTS_SCSI_VALID_TQ;
3833 if (dval & DP_TQING_ENABLE) {
3834 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3836 spi->valid |= CTS_SPI_VALID_DISC;
3837 if (dval & DP_DISC_ENABLE) {
3838 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3842 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3843 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3844 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3849 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3851 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3853 ptr = &mpt->mpt_dev_page1[tgt];
3855 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3857 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3862 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3864 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3866 ptr = &mpt->mpt_dev_page1[tgt];
3867 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3868 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3869 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3870 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3871 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3875 ptr->RequestedParameters |=
3876 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3877 ptr->RequestedParameters |=
3878 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3880 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3883 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3884 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3889 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3891 CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3894 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3895 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3896 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3897 tmp = mpt->mpt_dev_page1[tgt];
3898 host2mpt_config_page_scsi_device_1(&tmp);
3899 rv = mpt_write_cur_cfg_page(mpt, tgt,
3900 &tmp.Header, sizeof(tmp), FALSE, 5000);
3902 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3908 /****************************** Timeout Recovery ******************************/
3910 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3914 error = mpt_kthread_create(mpt_recovery_thread, mpt,
3915 &mpt->recovery_thread, /*flags*/0,
3916 /*altstack*/0, "mpt_recovery%d", mpt->unit);
3921 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3924 if (mpt->recovery_thread == NULL) {
3927 mpt->shutdwn_recovery = 1;
3930 * Sleep on a slightly different location
3931 * for this interlock just for added safety.
3933 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3937 mpt_recovery_thread(void *arg)
3939 struct mpt_softc *mpt;
3941 mpt = (struct mpt_softc *)arg;
3944 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3945 if (mpt->shutdwn_recovery == 0) {
3946 mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3949 if (mpt->shutdwn_recovery != 0) {
3952 mpt_recover_commands(mpt);
3954 mpt->recovery_thread = NULL;
3955 wakeup(&mpt->recovery_thread);
3957 mpt_kthread_exit(0);
3961 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3962 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3964 MSG_SCSI_TASK_MGMT *tmf_req;
3968 * Wait for any current TMF request to complete.
3969 * We're only allowed to issue one TMF at a time.
3971 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3972 sleep_ok, MPT_TMF_MAX_TIMEOUT);
3974 mpt_reset(mpt, TRUE);
3978 mpt_assign_serno(mpt, mpt->tmf_req);
3979 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3981 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3982 memset(tmf_req, 0, sizeof(*tmf_req));
3983 tmf_req->TargetID = target;
3984 tmf_req->Bus = channel;
3985 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3986 tmf_req->TaskType = type;
3987 tmf_req->MsgFlags = flags;
3988 tmf_req->MsgContext =
3989 htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3990 if (lun > MPT_MAX_LUNS) {
3991 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
3992 tmf_req->LUN[1] = lun & 0xff;
3994 tmf_req->LUN[1] = lun;
3996 tmf_req->TaskMsgContext = abort_ctx;
3998 mpt_lprt(mpt, MPT_PRT_DEBUG,
3999 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
4000 mpt->tmf_req->serno, tmf_req->MsgContext);
4001 if (mpt->verbose > MPT_PRT_DEBUG) {
4002 mpt_print_request(tmf_req);
4005 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
4006 ("mpt_scsi_send_tmf: tmf_req already on pending list"));
4007 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
4008 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
4009 if (error != MPT_OK) {
4010 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
4011 mpt->tmf_req->state = REQ_STATE_FREE;
4012 mpt_reset(mpt, TRUE);
4018 * When a command times out, it is placed on the requeust_timeout_list
4019 * and we wake our recovery thread. The MPT-Fusion architecture supports
4020 * only a single TMF operation at a time, so we serially abort/bdr, etc,
4021 * the timedout transactions. The next TMF is issued either by the
4022 * completion handler of the current TMF waking our recovery thread,
4023 * or the TMF timeout handler causing a hard reset sequence.
4026 mpt_recover_commands(struct mpt_softc *mpt)
4032 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4034 * No work to do- leave.
4036 mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
4041 * Flush any commands whose completion coincides with their timeout.
4045 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4047 * The timedout commands have already
4048 * completed. This typically means
4049 * that either the timeout value was on
4050 * the hairy edge of what the device
4051 * requires or - more likely - interrupts
4052 * are not happening.
4054 mpt_prt(mpt, "Timedout requests already complete. "
4055 "Interrupts may not be functioning.\n");
4056 mpt_enable_ints(mpt);
4061 * We have no visibility into the current state of the
4062 * controller, so attempt to abort the commands in the
4063 * order they timed-out. For initiator commands, we
4064 * depend on the reply handler pulling requests off
4067 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4070 MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4072 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4073 req, req->serno, hdrp->Function);
4076 mpt_prt(mpt, "null ccb in timed out request. "
4077 "Resetting Controller.\n");
4078 mpt_reset(mpt, TRUE);
4081 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4084 * Check to see if this is not an initiator command and
4085 * deal with it differently if it is.
4087 switch (hdrp->Function) {
4088 case MPI_FUNCTION_SCSI_IO_REQUEST:
4089 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4093 * XXX: FIX ME: need to abort target assists...
4095 mpt_prt(mpt, "just putting it back on the pend q\n");
4096 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4097 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4102 error = mpt_scsi_send_tmf(mpt,
4103 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4104 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4105 htole32(req->index | scsi_io_handler_id), TRUE);
4109 * mpt_scsi_send_tmf hard resets on failure, so no
4110 * need to do so here. Our queue should be emptied
4111 * by the hard reset.
4116 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4117 REQ_STATE_DONE, TRUE, 500);
4119 status = le16toh(mpt->tmf_req->IOCStatus);
4120 response = mpt->tmf_req->ResponseCode;
4121 mpt->tmf_req->state = REQ_STATE_FREE;
4125 * If we've errored out,, reset the controller.
4127 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4128 "Resetting controller\n");
4129 mpt_reset(mpt, TRUE);
4133 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4134 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4135 "Resetting controller.\n", status);
4136 mpt_reset(mpt, TRUE);
4140 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4141 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4142 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4143 "Resetting controller.\n", response);
4144 mpt_reset(mpt, TRUE);
4147 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4151 /************************ Target Mode Support ****************************/
4153 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4155 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4156 PTR_SGE_TRANSACTION32 tep;
4157 PTR_SGE_SIMPLE32 se;
4161 paddr = req->req_pbuf;
4162 paddr += MPT_RQSL(mpt);
4165 memset(fc, 0, MPT_REQUEST_AREA);
4166 fc->BufferCount = 1;
4167 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4168 fc->MsgContext = htole32(req->index | fc_els_handler_id);
4171 * Okay, set up ELS buffer pointers. ELS buffer pointers
4172 * consist of a TE SGL element (with details length of zero)
4173 * followed by a SIMPLE SGL element which holds the address
4177 tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4179 tep->ContextSize = 4;
4181 tep->TransactionContext[0] = htole32(ioindex);
4183 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4185 MPI_SGE_FLAGS_HOST_TO_IOC |
4186 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4187 MPI_SGE_FLAGS_LAST_ELEMENT |
4188 MPI_SGE_FLAGS_END_OF_LIST |
4189 MPI_SGE_FLAGS_END_OF_BUFFER;
4190 fl <<= MPI_SGE_FLAGS_SHIFT;
4191 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4192 se->FlagsLength = htole32(fl);
4193 se->Address = htole32((uint32_t) paddr);
4194 mpt_lprt(mpt, MPT_PRT_DEBUG,
4195 "add ELS index %d ioindex %d for %p:%u\n",
4196 req->index, ioindex, req, req->serno);
4197 KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4198 ("mpt_fc_post_els: request not locked"));
4199 mpt_send_cmd(mpt, req);
4203 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4205 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4206 PTR_CMD_BUFFER_DESCRIPTOR cb;
4209 paddr = req->req_pbuf;
4210 paddr += MPT_RQSL(mpt);
4211 memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4212 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4215 fc->BufferCount = 1;
4216 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4217 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4219 cb = &fc->Buffer[0];
4220 cb->IoIndex = htole16(ioindex);
4221 cb->u.PhysicalAddress32 = htole32((U32) paddr);
4223 mpt_check_doorbell(mpt);
4224 mpt_send_cmd(mpt, req);
4228 mpt_add_els_buffers(struct mpt_softc *mpt)
4232 if (mpt->is_fc == 0) {
4236 if (mpt->els_cmds_allocated) {
4240 mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
4241 M_DEVBUF, M_NOWAIT | M_ZERO);
4243 if (mpt->els_cmd_ptrs == NULL) {
4248 * Feed the chip some ELS buffer resources
4250 for (i = 0; i < MPT_MAX_ELS; i++) {
4251 request_t *req = mpt_get_request(mpt, FALSE);
4255 req->state |= REQ_STATE_LOCKED;
4256 mpt->els_cmd_ptrs[i] = req;
4257 mpt_fc_post_els(mpt, req, i);
4261 mpt_prt(mpt, "unable to add ELS buffer resources\n");
4262 free(mpt->els_cmd_ptrs, M_DEVBUF);
4263 mpt->els_cmd_ptrs = NULL;
4266 if (i != MPT_MAX_ELS) {
4267 mpt_lprt(mpt, MPT_PRT_INFO,
4268 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS);
4270 mpt->els_cmds_allocated = i;
4275 mpt_add_target_commands(struct mpt_softc *mpt)
4279 if (mpt->tgt_cmd_ptrs) {
4283 max = MPT_MAX_REQUESTS(mpt) >> 1;
4284 if (max > mpt->mpt_max_tgtcmds) {
4285 max = mpt->mpt_max_tgtcmds;
4288 malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4289 if (mpt->tgt_cmd_ptrs == NULL) {
4291 "mpt_add_target_commands: could not allocate cmd ptrs\n");
4295 for (i = 0; i < max; i++) {
4298 req = mpt_get_request(mpt, FALSE);
4302 req->state |= REQ_STATE_LOCKED;
4303 mpt->tgt_cmd_ptrs[i] = req;
4304 mpt_post_target_command(mpt, req, i);
4309 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4310 free(mpt->tgt_cmd_ptrs, M_DEVBUF);
4311 mpt->tgt_cmd_ptrs = NULL;
4315 mpt->tgt_cmds_allocated = i;
4318 mpt_lprt(mpt, MPT_PRT_INFO,
4319 "added %d of %d target bufs\n", i, max);
4325 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4328 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4330 } else if (lun >= MPT_MAX_LUNS) {
4332 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4335 if (mpt->tenabled == 0) {
4337 (void) mpt_fc_reset_link(mpt, 0);
4341 if (lun == CAM_LUN_WILDCARD) {
4342 mpt->trt_wildcard.enabled = 1;
4344 mpt->trt[lun].enabled = 1;
4350 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4354 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4356 } else if (lun >= MPT_MAX_LUNS) {
4358 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4361 if (lun == CAM_LUN_WILDCARD) {
4362 mpt->trt_wildcard.enabled = 0;
4364 mpt->trt[lun].enabled = 0;
4366 for (i = 0; i < MPT_MAX_LUNS; i++) {
4367 if (mpt->trt[lun].enabled) {
4371 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4373 (void) mpt_fc_reset_link(mpt, 0);
4381 * Called with MPT lock held
4384 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4386 struct ccb_scsiio *csio = &ccb->csio;
4387 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4388 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4390 switch (tgt->state) {
4391 case TGT_STATE_IN_CAM:
4393 case TGT_STATE_MOVING_DATA:
4394 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4395 xpt_freeze_simq(mpt->sim, 1);
4396 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4397 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4401 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4402 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4403 mpt_tgt_dump_req_state(mpt, cmd_req);
4404 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4409 if (csio->dxfer_len) {
4410 bus_dmamap_callback_t *cb;
4411 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4415 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4416 ("dxfer_len %u but direction is NONE", csio->dxfer_len));
4418 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4419 if (mpt->outofbeer == 0) {
4421 xpt_freeze_simq(mpt->sim, 1);
4422 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4424 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4425 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4429 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4430 if (sizeof (bus_addr_t) > 4) {
4431 cb = mpt_execute_req_a64;
4433 cb = mpt_execute_req;
4437 ccb->ccb_h.ccb_req_ptr = req;
4440 * Record the currently active ccb and the
4441 * request for it in our target state area.
4446 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4450 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4452 ta->QueueTag = ssp->InitiatorTag;
4453 } else if (mpt->is_spi) {
4454 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4456 ta->QueueTag = sp->Tag;
4458 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4459 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4460 ta->ReplyWord = htole32(tgt->reply_desc);
4461 if (csio->ccb_h.target_lun > MPT_MAX_LUNS) {
4463 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4464 ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4466 ta->LUN[1] = csio->ccb_h.target_lun;
4469 ta->RelativeOffset = tgt->bytes_xfered;
4470 ta->DataLength = ccb->csio.dxfer_len;
4471 if (ta->DataLength > tgt->resid) {
4472 ta->DataLength = tgt->resid;
4476 * XXX Should be done after data transfer completes?
4478 tgt->resid -= csio->dxfer_len;
4479 tgt->bytes_xfered += csio->dxfer_len;
4481 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4482 ta->TargetAssistFlags |=
4483 TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4486 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4487 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4488 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4489 ta->TargetAssistFlags |=
4490 TARGET_ASSIST_FLAGS_AUTO_STATUS;
4493 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4495 mpt_lprt(mpt, MPT_PRT_DEBUG,
4496 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4497 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4498 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4500 error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb,
4502 if (error == EINPROGRESS) {
4503 xpt_freeze_simq(mpt->sim, 1);
4504 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4507 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4510 * XXX: I don't know why this seems to happen, but
4511 * XXX: completing the CCB seems to make things happy.
4512 * XXX: This seems to happen if the initiator requests
4513 * XXX: enough data that we have to do multiple CTIOs.
4515 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4516 mpt_lprt(mpt, MPT_PRT_DEBUG,
4517 "Meaningless STATUS CCB (%p): flags %x status %x "
4518 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4519 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4520 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4521 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4525 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4527 memcpy(sp, &csio->sense_data,
4528 min(csio->sense_len, MPT_SENSE_SIZE));
4530 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4535 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4536 uint32_t lun, int send, uint8_t *data, size_t length)
4538 mpt_tgt_state_t *tgt;
4539 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4547 * We enter with resid set to the data load for the command.
4549 tgt = MPT_TGT_STATE(mpt, cmd_req);
4550 if (length == 0 || tgt->resid == 0) {
4552 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4556 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4557 mpt_prt(mpt, "out of resources- dropping local response\n");
4563 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4567 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4568 ta->QueueTag = ssp->InitiatorTag;
4569 } else if (mpt->is_spi) {
4570 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4571 ta->QueueTag = sp->Tag;
4573 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4574 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4575 ta->ReplyWord = htole32(tgt->reply_desc);
4576 if (lun > MPT_MAX_LUNS) {
4577 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4578 ta->LUN[1] = lun & 0xff;
4582 ta->RelativeOffset = 0;
4583 ta->DataLength = length;
4585 dptr = req->req_vbuf;
4586 dptr += MPT_RQSL(mpt);
4587 pptr = req->req_pbuf;
4588 pptr += MPT_RQSL(mpt);
4589 memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4591 se = (SGE_SIMPLE32 *) &ta->SGL[0];
4592 memset(se, 0,sizeof (*se));
4594 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4596 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4597 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4600 MPI_pSGE_SET_LENGTH(se, length);
4601 flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4602 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4603 MPI_pSGE_SET_FLAGS(se, flags);
4607 tgt->resid -= length;
4608 tgt->bytes_xfered = length;
4609 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4610 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4612 tgt->state = TGT_STATE_MOVING_DATA;
4614 mpt_send_cmd(mpt, req);
4618 * Abort queued up CCBs
4621 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4623 struct mpt_hdr_stailq *lp;
4624 struct ccb_hdr *srch;
4626 union ccb *accb = ccb->cab.abort_ccb;
4627 tgt_resource_t *trtp;
4629 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4631 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4632 trtp = &mpt->trt_wildcard;
4634 trtp = &mpt->trt[ccb->ccb_h.target_lun];
4637 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4639 } else if (accb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) {
4642 return (CAM_REQ_INVALID);
4645 STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4646 if (srch == &accb->ccb_h) {
4648 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4653 accb->ccb_h.status = CAM_REQ_ABORTED;
4655 return (CAM_REQ_CMP);
4657 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4658 return (CAM_PATH_INVALID);
4662 * Ask the MPT to abort the current target command
4665 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4669 PTR_MSG_TARGET_MODE_ABORT abtp;
4671 req = mpt_get_request(mpt, FALSE);
4675 abtp = req->req_vbuf;
4676 memset(abtp, 0, sizeof (*abtp));
4678 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4679 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4680 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4681 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4683 if (mpt->is_fc || mpt->is_sas) {
4684 mpt_send_cmd(mpt, req);
4686 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4692 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4693 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4694 * FC929 to set bogus FC_RSP fields (nonzero residuals
4695 * but w/o RESID fields set). This causes QLogic initiators
4696 * to think maybe that a frame was lost.
4698 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4699 * we use allocated requests to do TARGET_ASSIST and we
4700 * need to know when to release them.
4704 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4705 uint8_t status, uint8_t const *sense_data)
4708 mpt_tgt_state_t *tgt;
4709 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4715 cmd_vbuf = cmd_req->req_vbuf;
4716 cmd_vbuf += MPT_RQSL(mpt);
4717 tgt = MPT_TGT_STATE(mpt, cmd_req);
4719 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4720 if (mpt->outofbeer == 0) {
4722 xpt_freeze_simq(mpt->sim, 1);
4723 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4726 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4727 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4731 "could not allocate status request- dropping\n");
4737 ccb->ccb_h.ccb_mpt_ptr = mpt;
4738 ccb->ccb_h.ccb_req_ptr = req;
4742 * Record the currently active ccb, if any, and the
4743 * request for it in our target state area.
4747 tgt->state = TGT_STATE_SENDING_STATUS;
4750 paddr = req->req_pbuf;
4751 paddr += MPT_RQSL(mpt);
4753 memset(tp, 0, sizeof (*tp));
4754 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4756 PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4757 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4761 sts_vbuf = req->req_vbuf;
4762 sts_vbuf += MPT_RQSL(mpt);
4763 rsp = (uint32_t *) sts_vbuf;
4764 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4767 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4768 * It has to be big-endian in memory and is organized
4769 * in 32 bit words, which are much easier to deal with
4770 * as words which are swizzled as needed.
4772 * All we're filling here is the FC_RSP payload.
4773 * We may just have the chip synthesize it if
4774 * we have no residual and an OK status.
4777 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4781 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */
4782 rsp[3] = htobe32(tgt->resid);
4783 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4784 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4787 if (status == SCSI_STATUS_CHECK_COND) {
4790 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */
4791 rsp[4] = htobe32(MPT_SENSE_SIZE);
4793 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4795 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4796 "TION but no sense data?\n");
4797 memset(&rsp, 0, MPT_SENSE_SIZE);
4799 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4800 rsp[i] = htobe32(rsp[i]);
4802 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4803 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4806 #ifndef WE_TRUST_AUTO_GOOD_STATUS
4807 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4809 rsp[2] = htobe32(rsp[2]);
4810 } else if (mpt->is_sas) {
4811 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4812 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4813 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4815 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4816 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4817 tp->StatusCode = status;
4818 tp->QueueTag = htole16(sp->Tag);
4819 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4822 tp->ReplyWord = htole32(tgt->reply_desc);
4823 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4825 #ifdef WE_CAN_USE_AUTO_REPOST
4826 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4828 if (status == SCSI_STATUS_OK && resplen == 0) {
4829 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4831 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4833 MPI_SGE_FLAGS_HOST_TO_IOC |
4834 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4835 MPI_SGE_FLAGS_LAST_ELEMENT |
4836 MPI_SGE_FLAGS_END_OF_LIST |
4837 MPI_SGE_FLAGS_END_OF_BUFFER;
4838 fl <<= MPI_SGE_FLAGS_SHIFT;
4840 tp->StatusDataSGE.FlagsLength = htole32(fl);
4843 mpt_lprt(mpt, MPT_PRT_DEBUG,
4844 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4845 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4846 req->serno, tgt->resid);
4848 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4849 mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb);
4851 mpt_send_cmd(mpt, req);
4855 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4856 tgt_resource_t *trtp, int init_id)
4858 struct ccb_immediate_notify *inot;
4859 mpt_tgt_state_t *tgt;
4861 tgt = MPT_TGT_STATE(mpt, req);
4862 inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots);
4864 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4865 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4868 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4869 mpt_lprt(mpt, MPT_PRT_DEBUG1,
4870 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4872 inot->initiator_id = init_id; /* XXX */
4874 * This is a somewhat grotesque attempt to map from task management
4875 * to old style SCSI messages. God help us all.
4878 case MPT_ABORT_TASK_SET:
4879 inot->arg = MSG_ABORT_TAG;
4881 case MPT_CLEAR_TASK_SET:
4882 inot->arg = MSG_CLEAR_TASK_SET;
4884 case MPT_TARGET_RESET:
4885 inot->arg = MSG_TARGET_RESET;
4888 inot->arg = MSG_CLEAR_ACA;
4890 case MPT_TERMINATE_TASK:
4891 inot->arg = MSG_ABORT_TAG;
4894 inot->arg = MSG_NOOP;
4898 * XXX KDM we need the sequence/tag number for the target of the
4899 * task management operation, especially if it is an abort.
4901 tgt->ccb = (union ccb *) inot;
4902 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4903 xpt_done((union ccb *)inot);
4907 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4909 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
4910 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
4911 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ',
4912 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I',
4913 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V',
4916 struct ccb_accept_tio *atiop;
4919 mpt_tgt_state_t *tgt;
4920 tgt_resource_t *trtp = NULL;
4925 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4929 * Stash info for the current command where we can get at it later.
4931 vbuf = req->req_vbuf;
4932 vbuf += MPT_RQSL(mpt);
4935 * Get our state pointer set up.
4937 tgt = MPT_TGT_STATE(mpt, req);
4938 if (tgt->state != TGT_STATE_LOADED) {
4939 mpt_tgt_dump_req_state(mpt, req);
4940 panic("bad target state in mpt_scsi_tgt_atio");
4942 memset(tgt, 0, sizeof (mpt_tgt_state_t));
4943 tgt->state = TGT_STATE_IN_CAM;
4944 tgt->reply_desc = reply_desc;
4945 ioindex = GET_IO_INDEX(reply_desc);
4946 if (mpt->verbose >= MPT_PRT_DEBUG) {
4947 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
4948 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
4949 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
4950 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
4953 PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4954 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4955 if (fc->FcpCntl[2]) {
4957 * Task Management Request
4959 switch (fc->FcpCntl[2]) {
4961 fct = MPT_ABORT_TASK_SET;
4964 fct = MPT_CLEAR_TASK_SET;
4967 fct = MPT_TARGET_RESET;
4970 fct = MPT_CLEAR_ACA;
4973 fct = MPT_TERMINATE_TASK;
4976 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4978 mpt_scsi_tgt_status(mpt, 0, req,
4983 switch (fc->FcpCntl[1]) {
4985 tag_action = MSG_SIMPLE_Q_TAG;
4988 tag_action = MSG_HEAD_OF_Q_TAG;
4991 tag_action = MSG_ORDERED_Q_TAG;
4995 * Bah. Ignore Untagged Queing and ACA
4997 tag_action = MSG_SIMPLE_Q_TAG;
5001 tgt->resid = be32toh(fc->FcpDl);
5003 lunptr = fc->FcpLun;
5004 itag = be16toh(fc->OptionalOxid);
5005 } else if (mpt->is_sas) {
5006 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
5007 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
5009 lunptr = ssp->LogicalUnitNumber;
5010 itag = ssp->InitiatorTag;
5012 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
5013 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
5015 lunptr = sp->LogicalUnitNumber;
5020 * Generate a simple lun
5022 switch (lunptr[0] & 0xc0) {
5024 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
5030 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
5036 * Deal with non-enabled or bad luns here.
5038 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
5039 mpt->trt[lun].enabled == 0) {
5040 if (mpt->twildcard) {
5041 trtp = &mpt->trt_wildcard;
5042 } else if (fct == MPT_NIL_TMT_VALUE) {
5044 * In this case, we haven't got an upstream listener
5045 * for either a specific lun or wildcard luns. We
5046 * have to make some sensible response. For regular
5047 * inquiry, just return some NOT HERE inquiry data.
5048 * For VPD inquiry, report illegal field in cdb.
5049 * For REQUEST SENSE, just return NO SENSE data.
5050 * REPORT LUNS gets illegal command.
5051 * All other commands get 'no such device'.
5053 uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
5056 memset(buf, 0, MPT_SENSE_SIZE);
5057 cond = SCSI_STATUS_CHECK_COND;
5062 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5072 len = min(tgt->resid, cdbp[4]);
5073 len = min(len, sizeof (null_iqd));
5074 mpt_lprt(mpt, MPT_PRT_DEBUG,
5075 "local inquiry %ld bytes\n", (long) len);
5076 mpt_scsi_tgt_local(mpt, req, lun, 1,
5083 len = min(tgt->resid, cdbp[4]);
5084 len = min(len, sizeof (buf));
5085 mpt_lprt(mpt, MPT_PRT_DEBUG,
5086 "local reqsense %ld bytes\n", (long) len);
5087 mpt_scsi_tgt_local(mpt, req, lun, 1,
5092 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
5096 mpt_lprt(mpt, MPT_PRT_DEBUG,
5097 "CMD 0x%x to unmanaged lun %u\n",
5102 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
5105 /* otherwise, leave trtp NULL */
5107 trtp = &mpt->trt[lun];
5111 * Deal with any task management
5113 if (fct != MPT_NIL_TMT_VALUE) {
5115 mpt_prt(mpt, "task mgmt function %x but no listener\n",
5117 mpt_scsi_tgt_status(mpt, 0, req,
5120 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5121 GET_INITIATOR_INDEX(reply_desc));
5127 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5128 if (atiop == NULL) {
5129 mpt_lprt(mpt, MPT_PRT_WARN,
5130 "no ATIOs for lun %u- sending back %s\n", lun,
5131 mpt->tenabled? "QUEUE FULL" : "BUSY");
5132 mpt_scsi_tgt_status(mpt, NULL, req,
5133 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5137 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5138 mpt_lprt(mpt, MPT_PRT_DEBUG1,
5139 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
5140 atiop->ccb_h.ccb_mpt_ptr = mpt;
5141 atiop->ccb_h.status = CAM_CDB_RECVD;
5142 atiop->ccb_h.target_lun = lun;
5143 atiop->sense_len = 0;
5144 atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5145 atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
5146 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5149 * The tag we construct here allows us to find the
5150 * original request that the command came in with.
5152 * This way we don't have to depend on anything but the
5153 * tag to find things when CCBs show back up from CAM.
5155 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5156 tgt->tag_id = atiop->tag_id;
5158 atiop->tag_action = tag_action;
5159 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
5161 if (mpt->verbose >= MPT_PRT_DEBUG) {
5163 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
5164 atiop->ccb_h.target_lun);
5165 for (i = 0; i < atiop->cdb_len; i++) {
5166 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5167 (i == (atiop->cdb_len - 1))? '>' : ' ');
5169 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5170 itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
5173 xpt_done((union ccb *)atiop);
5177 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5179 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5181 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5182 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
5183 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
5184 tgt->tag_id, tgt->state);
5188 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5191 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5192 req->index, req->index, req->state);
5193 mpt_tgt_dump_tgt_state(mpt, req);
5197 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5198 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5204 if (reply_frame == NULL) {
5206 * Figure out what the state of the command is.
5208 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5211 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5213 mpt_req_not_spcl(mpt, tgt->req,
5214 "turbo scsi_tgt_reply associated req", __LINE__);
5217 switch(tgt->state) {
5218 case TGT_STATE_LOADED:
5220 * This is a new command starting.
5222 mpt_scsi_tgt_atio(mpt, req, reply_desc);
5224 case TGT_STATE_MOVING_DATA:
5226 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
5229 if (tgt->req == NULL) {
5230 panic("mpt: turbo target reply with null "
5231 "associated request moving data");
5235 if (tgt->is_local == 0) {
5236 panic("mpt: turbo target reply with "
5237 "null associated ccb moving data");
5240 mpt_lprt(mpt, MPT_PRT_DEBUG,
5241 "TARGET_ASSIST local done\n");
5242 TAILQ_REMOVE(&mpt->request_pending_list,
5244 mpt_free_request(mpt, tgt->req);
5246 mpt_scsi_tgt_status(mpt, NULL, req,
5252 mpt_req_untimeout(req, mpt_timeout, ccb);
5253 mpt_lprt(mpt, MPT_PRT_DEBUG,
5254 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5255 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5257 * Free the Target Assist Request
5259 KASSERT(tgt->req->ccb == ccb,
5260 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5261 tgt->req->serno, tgt->req->ccb));
5262 TAILQ_REMOVE(&mpt->request_pending_list,
5264 mpt_free_request(mpt, tgt->req);
5268 * Do we need to send status now? That is, are
5269 * we done with all our data transfers?
5271 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5272 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5273 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5274 KASSERT(ccb->ccb_h.status,
5275 ("zero ccb sts at %d", __LINE__));
5276 tgt->state = TGT_STATE_IN_CAM;
5277 if (mpt->outofbeer) {
5278 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5280 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5286 * Otherwise, send status (and sense)
5288 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5290 memcpy(sp, &ccb->csio.sense_data,
5291 min(ccb->csio.sense_len, MPT_SENSE_SIZE));
5293 mpt_scsi_tgt_status(mpt, ccb, req,
5294 ccb->csio.scsi_status, sp);
5297 case TGT_STATE_SENDING_STATUS:
5298 case TGT_STATE_MOVING_DATA_AND_STATUS:
5303 if (tgt->req == NULL) {
5304 panic("mpt: turbo target reply with null "
5305 "associated request sending status");
5312 TGT_STATE_MOVING_DATA_AND_STATUS) {
5315 mpt_req_untimeout(req, mpt_timeout, ccb);
5316 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5317 ccb->ccb_h.status |= CAM_SENT_SENSE;
5319 mpt_lprt(mpt, MPT_PRT_DEBUG,
5320 "TARGET_STATUS tag %x sts %x flgs %x req "
5321 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5322 ccb->ccb_h.flags, tgt->req);
5324 * Free the Target Send Status Request
5326 KASSERT(tgt->req->ccb == ccb,
5327 ("tgt->req %p:%u tgt->req->ccb %p",
5328 tgt->req, tgt->req->serno, tgt->req->ccb));
5330 * Notify CAM that we're done
5332 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5333 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5334 KASSERT(ccb->ccb_h.status,
5335 ("ZERO ccb sts at %d", __LINE__));
5338 mpt_lprt(mpt, MPT_PRT_DEBUG,
5339 "TARGET_STATUS non-CAM for req %p:%u\n",
5340 tgt->req, tgt->req->serno);
5342 TAILQ_REMOVE(&mpt->request_pending_list,
5344 mpt_free_request(mpt, tgt->req);
5348 * And re-post the Command Buffer.
5349 * This will reset the state.
5351 ioindex = GET_IO_INDEX(reply_desc);
5352 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5354 mpt_post_target_command(mpt, req, ioindex);
5357 * And post a done for anyone who cares
5360 if (mpt->outofbeer) {
5361 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5363 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5369 case TGT_STATE_NIL: /* XXX This Never Happens XXX */
5370 tgt->state = TGT_STATE_LOADED;
5373 mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5374 "Reply Function\n", tgt->state);
5379 status = le16toh(reply_frame->IOCStatus);
5380 if (status != MPI_IOCSTATUS_SUCCESS) {
5381 dbg = MPT_PRT_ERROR;
5383 dbg = MPT_PRT_DEBUG1;
5387 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5388 req, req->serno, reply_frame, reply_frame->Function, status);
5390 switch (reply_frame->Function) {
5391 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5393 mpt_tgt_state_t *tgt;
5395 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5397 if (status != MPI_IOCSTATUS_SUCCESS) {
5403 tgt = MPT_TGT_STATE(mpt, req);
5404 KASSERT(tgt->state == TGT_STATE_LOADING,
5405 ("bad state 0x%x on reply to buffer post", tgt->state));
5406 mpt_assign_serno(mpt, req);
5407 tgt->state = TGT_STATE_LOADED;
5410 case MPI_FUNCTION_TARGET_ASSIST:
5412 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5414 mpt_prt(mpt, "target assist completion\n");
5415 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5416 mpt_free_request(mpt, req);
5418 case MPI_FUNCTION_TARGET_STATUS_SEND:
5420 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5422 mpt_prt(mpt, "status send completion\n");
5423 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5424 mpt_free_request(mpt, req);
5426 case MPI_FUNCTION_TARGET_MODE_ABORT:
5428 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5429 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5430 PTR_MSG_TARGET_MODE_ABORT abtp =
5431 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5432 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5434 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5436 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5437 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5438 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5439 mpt_free_request(mpt, req);
5443 mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5444 "0x%x\n", reply_frame->Function);