2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
5 * Copyright (c) 2000, 2001 by Greg Ansley
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
61 * Support from LSI-Logic has also gone a great deal toward making this a
62 * workable subsystem and is gratefully acknowledged.
65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66 * Copyright (c) 2005, WHEEL Sp. z o.o.
67 * Copyright (c) 2004, 2005 Justin T. Gibbs
68 * All rights reserved.
70 * Redistribution and use in source and binary forms, with or without
71 * modification, are permitted provided that the following conditions are
73 * 1. Redistributions of source code must retain the above copyright
74 * notice, this list of conditions and the following disclaimer.
75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76 * substantially similar to the "NO WARRANTY" disclaimer below
77 * ("Disclaimer") and any redistribution must be conditioned upon including
78 * a substantially similar Disclaimer requirement for further binary
80 * 3. Neither the names of the above listed copyright holders nor the names
81 * of any contributors may be used to endorse or promote products derived
82 * from this software without specific prior written permission.
84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96 #include <sys/cdefs.h>
97 __FBSDID("$FreeBSD$");
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 #include "dev/mpt/mpilib/mpi_sas.h"
109 #include <sys/callout.h>
110 #include <sys/kthread.h>
111 #include <sys/sysctl.h>
113 static void mpt_poll(struct cam_sim *);
114 static timeout_t mpt_timeout;
115 static void mpt_action(struct cam_sim *, union ccb *);
117 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
118 static void mpt_setwidth(struct mpt_softc *, int, int);
119 static void mpt_setsync(struct mpt_softc *, int, int, int);
120 static int mpt_update_spi_config(struct mpt_softc *, int);
122 static mpt_reply_handler_t mpt_scsi_reply_handler;
123 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
124 static mpt_reply_handler_t mpt_fc_els_reply_handler;
125 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
126 MSG_DEFAULT_REPLY *);
127 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
128 static int mpt_fc_reset_link(struct mpt_softc *, int);
130 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
131 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
132 static void mpt_recovery_thread(void *arg);
133 static void mpt_recover_commands(struct mpt_softc *mpt);
135 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
136 target_id_t, lun_id_t, u_int, int);
138 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
139 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
140 static int mpt_add_els_buffers(struct mpt_softc *mpt);
141 static int mpt_add_target_commands(struct mpt_softc *mpt);
142 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
143 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
144 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
145 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
146 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
147 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
148 uint8_t, uint8_t const *, u_int);
150 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
151 tgt_resource_t *, int);
152 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
153 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
154 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
155 static mpt_reply_handler_t mpt_sata_pass_reply_handler;
157 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
158 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
159 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
160 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
162 static mpt_probe_handler_t mpt_cam_probe;
163 static mpt_attach_handler_t mpt_cam_attach;
164 static mpt_enable_handler_t mpt_cam_enable;
165 static mpt_ready_handler_t mpt_cam_ready;
166 static mpt_event_handler_t mpt_cam_event;
167 static mpt_reset_handler_t mpt_cam_ioc_reset;
168 static mpt_detach_handler_t mpt_cam_detach;
170 static struct mpt_personality mpt_cam_personality =
173 .probe = mpt_cam_probe,
174 .attach = mpt_cam_attach,
175 .enable = mpt_cam_enable,
176 .ready = mpt_cam_ready,
177 .event = mpt_cam_event,
178 .reset = mpt_cam_ioc_reset,
179 .detach = mpt_cam_detach,
182 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
183 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
185 int mpt_enable_sata_wc = -1;
186 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
189 mpt_cam_probe(struct mpt_softc *mpt)
194 * Only attach to nodes that support the initiator or target role
195 * (or want to) or have RAID physical devices that need CAM pass-thru
198 if (mpt->do_cfg_role) {
199 role = mpt->cfg_role;
203 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
204 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
211 mpt_cam_attach(struct mpt_softc *mpt)
213 struct cam_devq *devq;
214 mpt_handler_t handler;
219 TAILQ_INIT(&mpt->request_timeout_list);
220 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
221 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
223 handler.reply_handler = mpt_scsi_reply_handler;
224 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
225 &scsi_io_handler_id);
231 handler.reply_handler = mpt_scsi_tmf_reply_handler;
232 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
233 &scsi_tmf_handler_id);
240 * If we're fibre channel and could support target mode, we register
241 * an ELS reply handler and give it resources.
243 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
244 handler.reply_handler = mpt_fc_els_reply_handler;
245 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
251 if (mpt_add_els_buffers(mpt) == FALSE) {
256 maxq -= mpt->els_cmds_allocated;
260 * If we support target mode, we register a reply handler for it,
261 * but don't add command resources until we actually enable target
264 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
265 handler.reply_handler = mpt_scsi_tgt_reply_handler;
266 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
267 &mpt->scsi_tgt_handler_id);
275 handler.reply_handler = mpt_sata_pass_reply_handler;
276 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
277 &sata_pass_handler_id);
285 * We keep one request reserved for timeout TMF requests.
287 mpt->tmf_req = mpt_get_request(mpt, FALSE);
288 if (mpt->tmf_req == NULL) {
289 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
296 * Mark the request as free even though not on the free list.
297 * There is only one TMF request allowed to be outstanding at
298 * a time and the TMF routines perform their own allocation
299 * tracking using the standard state flags.
301 mpt->tmf_req->state = REQ_STATE_FREE;
305 * The rest of this is CAM foo, for which we need to drop our lock
309 if (mpt_spawn_recovery_thread(mpt) != 0) {
310 mpt_prt(mpt, "Unable to spawn recovery thread!\n");
316 * Create the device queue for our SIM(s).
318 devq = cam_simq_alloc(maxq);
320 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
326 * Construct our SIM entry.
329 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
330 if (mpt->sim == NULL) {
331 mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
338 * Register exactly this bus.
341 if (xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
342 mpt_prt(mpt, "Bus registration Failed!\n");
348 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
349 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
350 mpt_prt(mpt, "Unable to allocate Path!\n");
358 * Only register a second bus for RAID physical
359 * devices if the controller supports RAID.
361 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
366 * Create a "bus" to export all hidden disks to CAM.
369 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
370 if (mpt->phydisk_sim == NULL) {
371 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
380 if (xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
382 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
388 if (xpt_create_path(&mpt->phydisk_path, NULL,
389 cam_sim_path(mpt->phydisk_sim),
390 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
391 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
397 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
406 * Read FC configuration information
409 mpt_read_config_info_fc(struct mpt_softc *mpt)
411 struct sysctl_ctx_list *ctx;
412 struct sysctl_oid *tree;
413 char *topology = NULL;
416 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
417 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
421 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
422 mpt->mpt_fcport_page0.Header.PageVersion,
423 mpt->mpt_fcport_page0.Header.PageLength,
424 mpt->mpt_fcport_page0.Header.PageNumber,
425 mpt->mpt_fcport_page0.Header.PageType);
428 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
429 sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
431 mpt_prt(mpt, "failed to read FC Port Page 0\n");
434 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
436 switch (mpt->mpt_fcport_page0.CurrentSpeed) {
437 case MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT:
438 mpt->mpt_fcport_speed = 1;
440 case MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT:
441 mpt->mpt_fcport_speed = 2;
443 case MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT:
444 mpt->mpt_fcport_speed = 10;
446 case MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT:
447 mpt->mpt_fcport_speed = 4;
450 mpt->mpt_fcport_speed = 0;
454 switch (mpt->mpt_fcport_page0.Flags &
455 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
456 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
457 mpt->mpt_fcport_speed = 0;
458 topology = "<NO LOOP>";
460 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
463 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
464 topology = "NL-Port";
466 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
469 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
470 topology = "FL-Port";
473 mpt->mpt_fcport_speed = 0;
478 mpt->scinfo.fc.wwnn = ((uint64_t)mpt->mpt_fcport_page0.WWNN.High << 32)
479 | mpt->mpt_fcport_page0.WWNN.Low;
480 mpt->scinfo.fc.wwpn = ((uint64_t)mpt->mpt_fcport_page0.WWPN.High << 32)
481 | mpt->mpt_fcport_page0.WWPN.Low;
482 mpt->scinfo.fc.portid = mpt->mpt_fcport_page0.PortIdentifier;
484 mpt_lprt(mpt, MPT_PRT_INFO,
485 "FC Port Page 0: Topology <%s> WWNN 0x%16jx WWPN 0x%16jx "
486 "Speed %u-Gbit\n", topology,
487 (uintmax_t)mpt->scinfo.fc.wwnn, (uintmax_t)mpt->scinfo.fc.wwpn,
488 mpt->mpt_fcport_speed);
490 ctx = device_get_sysctl_ctx(mpt->dev);
491 tree = device_get_sysctl_tree(mpt->dev);
493 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
494 "wwnn", CTLFLAG_RD, &mpt->scinfo.fc.wwnn,
495 "World Wide Node Name");
497 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
498 "wwpn", CTLFLAG_RD, &mpt->scinfo.fc.wwpn,
499 "World Wide Port Name");
506 * Set FC configuration information.
509 mpt_set_initial_config_fc(struct mpt_softc *mpt)
511 CONFIG_PAGE_FC_PORT_1 fc;
516 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
517 &fc.Header, FALSE, 5000);
519 mpt_prt(mpt, "failed to read FC page 1 header\n");
520 return (mpt_fc_reset_link(mpt, 1));
523 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
524 &fc.Header, sizeof (fc), FALSE, 5000);
526 mpt_prt(mpt, "failed to read FC page 1\n");
527 return (mpt_fc_reset_link(mpt, 1));
529 mpt2host_config_page_fc_port_1(&fc);
532 * Check our flags to make sure we support the role we want.
538 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
539 role |= MPT_ROLE_INITIATOR;
541 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
542 role |= MPT_ROLE_TARGET;
545 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
547 if (mpt->do_cfg_role == 0) {
548 role = mpt->cfg_role;
550 mpt->do_cfg_role = 0;
553 if (role != mpt->cfg_role) {
554 if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
555 if ((role & MPT_ROLE_INITIATOR) == 0) {
556 mpt_prt(mpt, "adding initiator role\n");
557 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
560 mpt_prt(mpt, "keeping initiator role\n");
562 } else if (role & MPT_ROLE_INITIATOR) {
563 mpt_prt(mpt, "removing initiator role\n");
566 if (mpt->cfg_role & MPT_ROLE_TARGET) {
567 if ((role & MPT_ROLE_TARGET) == 0) {
568 mpt_prt(mpt, "adding target role\n");
569 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
572 mpt_prt(mpt, "keeping target role\n");
574 } else if (role & MPT_ROLE_TARGET) {
575 mpt_prt(mpt, "removing target role\n");
578 mpt->role = mpt->cfg_role;
581 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
582 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
583 mpt_prt(mpt, "adding OXID option\n");
584 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
591 host2mpt_config_page_fc_port_1(&fc);
592 r = mpt_write_cfg_page(mpt,
593 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
594 sizeof(fc), FALSE, 5000);
596 mpt_prt(mpt, "failed to update NVRAM with changes\n");
599 mpt_prt(mpt, "NOTE: NVRAM changes will not take "
600 "effect until next reboot or IOC reset\n");
606 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
608 ConfigExtendedPageHeader_t hdr;
609 struct mptsas_phyinfo *phyinfo;
610 SasIOUnitPage0_t *buffer;
613 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
614 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
618 if (hdr.ExtPageLength == 0) {
623 len = hdr.ExtPageLength * 4;
624 buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
625 if (buffer == NULL) {
630 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
631 0, &hdr, buffer, len, 0, 10000);
633 free(buffer, M_DEVBUF);
637 portinfo->num_phys = buffer->NumPhys;
638 portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) *
639 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
640 if (portinfo->phy_info == NULL) {
641 free(buffer, M_DEVBUF);
646 for (i = 0; i < portinfo->num_phys; i++) {
647 phyinfo = &portinfo->phy_info[i];
648 phyinfo->phy_num = i;
649 phyinfo->port_id = buffer->PhyData[i].Port;
650 phyinfo->negotiated_link_rate =
651 buffer->PhyData[i].NegotiatedLinkRate;
653 le16toh(buffer->PhyData[i].ControllerDevHandle);
656 free(buffer, M_DEVBUF);
662 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
663 uint32_t form, uint32_t form_specific)
665 ConfigExtendedPageHeader_t hdr;
666 SasPhyPage0_t *buffer;
669 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
670 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
674 if (hdr.ExtPageLength == 0) {
679 buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
680 if (buffer == NULL) {
685 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
686 form + form_specific, &hdr, buffer,
687 sizeof(SasPhyPage0_t), 0, 10000);
689 free(buffer, M_DEVBUF);
693 phy_info->hw_link_rate = buffer->HwLinkRate;
694 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
695 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
696 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
698 free(buffer, M_DEVBUF);
704 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
705 uint32_t form, uint32_t form_specific)
707 ConfigExtendedPageHeader_t hdr;
708 SasDevicePage0_t *buffer;
709 uint64_t sas_address;
712 bzero(device_info, sizeof(*device_info));
713 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
714 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
718 if (hdr.ExtPageLength == 0) {
723 buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
724 if (buffer == NULL) {
729 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
730 form + form_specific, &hdr, buffer,
731 sizeof(SasDevicePage0_t), 0, 10000);
733 free(buffer, M_DEVBUF);
737 device_info->dev_handle = le16toh(buffer->DevHandle);
738 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
739 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
740 device_info->slot = le16toh(buffer->Slot);
741 device_info->phy_num = buffer->PhyNum;
742 device_info->physical_port = buffer->PhysicalPort;
743 device_info->target_id = buffer->TargetID;
744 device_info->bus = buffer->Bus;
745 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
746 device_info->sas_address = le64toh(sas_address);
747 device_info->device_info = le32toh(buffer->DeviceInfo);
749 free(buffer, M_DEVBUF);
755 * Read SAS configuration information. Nothing to do yet.
758 mpt_read_config_info_sas(struct mpt_softc *mpt)
760 struct mptsas_portinfo *portinfo;
761 struct mptsas_phyinfo *phyinfo;
764 portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
765 if (portinfo == NULL)
768 error = mptsas_sas_io_unit_pg0(mpt, portinfo);
770 free(portinfo, M_DEVBUF);
774 for (i = 0; i < portinfo->num_phys; i++) {
775 phyinfo = &portinfo->phy_info[i];
776 error = mptsas_sas_phy_pg0(mpt, phyinfo,
777 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
778 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
781 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
782 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
783 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
787 phyinfo->identify.phy_num = phyinfo->phy_num = i;
788 if (phyinfo->attached.dev_handle)
789 error = mptsas_sas_device_pg0(mpt,
791 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
792 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
793 phyinfo->attached.dev_handle);
797 mpt->sas_portinfo = portinfo;
802 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
805 SataPassthroughRequest_t *pass;
809 req = mpt_get_request(mpt, 0);
813 pass = req->req_vbuf;
814 bzero(pass, sizeof(SataPassthroughRequest_t));
815 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
816 pass->TargetID = devinfo->target_id;
817 pass->Bus = devinfo->bus;
818 pass->PassthroughFlags = 0;
819 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
820 pass->DataLength = 0;
821 pass->MsgContext = htole32(req->index | sata_pass_handler_id);
822 pass->CommandFIS[0] = 0x27;
823 pass->CommandFIS[1] = 0x80;
824 pass->CommandFIS[2] = 0xef;
825 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
826 pass->CommandFIS[7] = 0x40;
827 pass->CommandFIS[15] = 0x08;
829 mpt_check_doorbell(mpt);
830 mpt_send_cmd(mpt, req);
831 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
834 mpt_free_request(mpt, req);
835 printf("error %d sending passthrough\n", error);
839 status = le16toh(req->IOCStatus);
840 if (status != MPI_IOCSTATUS_SUCCESS) {
841 mpt_free_request(mpt, req);
842 printf("IOCSTATUS %d\n", status);
846 mpt_free_request(mpt, req);
850 * Set SAS configuration information. Nothing to do yet.
853 mpt_set_initial_config_sas(struct mpt_softc *mpt)
855 struct mptsas_phyinfo *phyinfo;
858 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
859 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
860 phyinfo = &mpt->sas_portinfo->phy_info[i];
861 if (phyinfo->attached.dev_handle == 0)
863 if ((phyinfo->attached.device_info &
864 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
867 device_printf(mpt->dev,
868 "%sabling SATA WC on phy %d\n",
869 (mpt_enable_sata_wc) ? "En" : "Dis", i);
870 mptsas_set_sata_wc(mpt, &phyinfo->attached,
879 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
880 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
884 if (reply_frame != NULL) {
885 req->IOCStatus = le16toh(reply_frame->IOCStatus);
887 req->state &= ~REQ_STATE_QUEUED;
888 req->state |= REQ_STATE_DONE;
889 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
890 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
892 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
894 * Whew- we can free this request (late completion)
896 mpt_free_request(mpt, req);
904 * Read SCSI configuration information
907 mpt_read_config_info_spi(struct mpt_softc *mpt)
911 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
912 &mpt->mpt_port_page0.Header, FALSE, 5000);
916 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
917 mpt->mpt_port_page0.Header.PageVersion,
918 mpt->mpt_port_page0.Header.PageLength,
919 mpt->mpt_port_page0.Header.PageNumber,
920 mpt->mpt_port_page0.Header.PageType);
922 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
923 &mpt->mpt_port_page1.Header, FALSE, 5000);
927 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
928 mpt->mpt_port_page1.Header.PageVersion,
929 mpt->mpt_port_page1.Header.PageLength,
930 mpt->mpt_port_page1.Header.PageNumber,
931 mpt->mpt_port_page1.Header.PageType);
933 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
934 &mpt->mpt_port_page2.Header, FALSE, 5000);
938 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
939 mpt->mpt_port_page2.Header.PageVersion,
940 mpt->mpt_port_page2.Header.PageLength,
941 mpt->mpt_port_page2.Header.PageNumber,
942 mpt->mpt_port_page2.Header.PageType);
944 for (i = 0; i < 16; i++) {
945 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
946 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
950 mpt_lprt(mpt, MPT_PRT_DEBUG,
951 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
952 mpt->mpt_dev_page0[i].Header.PageVersion,
953 mpt->mpt_dev_page0[i].Header.PageLength,
954 mpt->mpt_dev_page0[i].Header.PageNumber,
955 mpt->mpt_dev_page0[i].Header.PageType);
957 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
958 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
962 mpt_lprt(mpt, MPT_PRT_DEBUG,
963 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
964 mpt->mpt_dev_page1[i].Header.PageVersion,
965 mpt->mpt_dev_page1[i].Header.PageLength,
966 mpt->mpt_dev_page1[i].Header.PageNumber,
967 mpt->mpt_dev_page1[i].Header.PageType);
971 * At this point, we don't *have* to fail. As long as we have
972 * valid config header information, we can (barely) lurch
976 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
977 sizeof(mpt->mpt_port_page0), FALSE, 5000);
979 mpt_prt(mpt, "failed to read SPI Port Page 0\n");
981 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
982 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
983 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
984 mpt->mpt_port_page0.Capabilities,
985 mpt->mpt_port_page0.PhysicalInterface);
988 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
989 sizeof(mpt->mpt_port_page1), FALSE, 5000);
991 mpt_prt(mpt, "failed to read SPI Port Page 1\n");
993 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
994 mpt_lprt(mpt, MPT_PRT_DEBUG,
995 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
996 mpt->mpt_port_page1.Configuration,
997 mpt->mpt_port_page1.OnBusTimerValue);
1000 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
1001 sizeof(mpt->mpt_port_page2), FALSE, 5000);
1003 mpt_prt(mpt, "failed to read SPI Port Page 2\n");
1005 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1006 "Port Page 2: Flags %x Settings %x\n",
1007 mpt->mpt_port_page2.PortFlags,
1008 mpt->mpt_port_page2.PortSettings);
1009 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1010 for (i = 0; i < 16; i++) {
1011 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1012 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1013 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1014 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1015 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1019 for (i = 0; i < 16; i++) {
1020 rv = mpt_read_cur_cfg_page(mpt, i,
1021 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1025 "cannot read SPI Target %d Device Page 0\n", i);
1028 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1029 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1030 "target %d page 0: Negotiated Params %x Information %x\n",
1031 i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1032 mpt->mpt_dev_page0[i].Information);
1034 rv = mpt_read_cur_cfg_page(mpt, i,
1035 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1039 "cannot read SPI Target %d Device Page 1\n", i);
1042 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1043 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1044 "target %d page 1: Requested Params %x Configuration %x\n",
1045 i, mpt->mpt_dev_page1[i].RequestedParameters,
1046 mpt->mpt_dev_page1[i].Configuration);
1052 * Validate SPI configuration information.
1054 * In particular, validate SPI Port Page 1.
1057 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1059 int error, i, pp1val;
1061 mpt->mpt_disc_enable = 0xff;
1062 mpt->mpt_tag_enable = 0;
1064 pp1val = ((1 << mpt->mpt_ini_id) <<
1065 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
1066 if (mpt->mpt_port_page1.Configuration != pp1val) {
1067 CONFIG_PAGE_SCSI_PORT_1 tmp;
1069 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1070 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1071 tmp = mpt->mpt_port_page1;
1072 tmp.Configuration = pp1val;
1073 host2mpt_config_page_scsi_port_1(&tmp);
1074 error = mpt_write_cur_cfg_page(mpt, 0,
1075 &tmp.Header, sizeof(tmp), FALSE, 5000);
1079 error = mpt_read_cur_cfg_page(mpt, 0,
1080 &tmp.Header, sizeof(tmp), FALSE, 5000);
1084 mpt2host_config_page_scsi_port_1(&tmp);
1085 if (tmp.Configuration != pp1val) {
1087 "failed to reset SPI Port Page 1 Config value\n");
1090 mpt->mpt_port_page1 = tmp;
1094 * The purpose of this exercise is to get
1095 * all targets back to async/narrow.
1097 * We skip this step if the BIOS has already negotiated
1098 * speeds with the targets.
1100 i = mpt->mpt_port_page2.PortSettings &
1101 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1102 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1103 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1104 "honoring BIOS transfer negotiations\n");
1106 for (i = 0; i < 16; i++) {
1107 mpt->mpt_dev_page1[i].RequestedParameters = 0;
1108 mpt->mpt_dev_page1[i].Configuration = 0;
1109 (void) mpt_update_spi_config(mpt, i);
1116 mpt_cam_enable(struct mpt_softc *mpt)
1124 if (mpt_read_config_info_fc(mpt)) {
1127 if (mpt_set_initial_config_fc(mpt)) {
1130 } else if (mpt->is_sas) {
1131 if (mpt_read_config_info_sas(mpt)) {
1134 if (mpt_set_initial_config_sas(mpt)) {
1137 } else if (mpt->is_spi) {
1138 if (mpt_read_config_info_spi(mpt)) {
1141 if (mpt_set_initial_config_spi(mpt)) {
1153 mpt_cam_ready(struct mpt_softc *mpt)
1157 * If we're in target mode, hang out resources now
1158 * so we don't cause the world to hang talking to us.
1160 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1162 * Try to add some target command resources
1165 if (mpt_add_target_commands(mpt) == FALSE) {
1166 mpt_prt(mpt, "failed to add target commands\n");
1174 mpt_cam_detach(struct mpt_softc *mpt)
1176 mpt_handler_t handler;
1180 mpt_terminate_recovery_thread(mpt);
1182 handler.reply_handler = mpt_scsi_reply_handler;
1183 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1184 scsi_io_handler_id);
1185 handler.reply_handler = mpt_scsi_tmf_reply_handler;
1186 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1187 scsi_tmf_handler_id);
1188 handler.reply_handler = mpt_fc_els_reply_handler;
1189 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1191 handler.reply_handler = mpt_scsi_tgt_reply_handler;
1192 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1193 mpt->scsi_tgt_handler_id);
1194 handler.reply_handler = mpt_sata_pass_reply_handler;
1195 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1196 sata_pass_handler_id);
1198 if (mpt->tmf_req != NULL) {
1199 mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1200 mpt_free_request(mpt, mpt->tmf_req);
1201 mpt->tmf_req = NULL;
1203 if (mpt->sas_portinfo != NULL) {
1204 free(mpt->sas_portinfo, M_DEVBUF);
1205 mpt->sas_portinfo = NULL;
1208 if (mpt->sim != NULL) {
1209 xpt_free_path(mpt->path);
1210 xpt_bus_deregister(cam_sim_path(mpt->sim));
1211 cam_sim_free(mpt->sim, TRUE);
1215 if (mpt->phydisk_sim != NULL) {
1216 xpt_free_path(mpt->phydisk_path);
1217 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1218 cam_sim_free(mpt->phydisk_sim, TRUE);
1219 mpt->phydisk_sim = NULL;
1224 /* This routine is used after a system crash to dump core onto the swap device.
1227 mpt_poll(struct cam_sim *sim)
1229 struct mpt_softc *mpt;
1231 mpt = (struct mpt_softc *)cam_sim_softc(sim);
1236 * Watchdog timeout routine for SCSI requests.
1239 mpt_timeout(void *arg)
1242 struct mpt_softc *mpt;
1245 ccb = (union ccb *)arg;
1246 mpt = ccb->ccb_h.ccb_mpt_ptr;
1248 MPT_LOCK_ASSERT(mpt);
1249 req = ccb->ccb_h.ccb_req_ptr;
1250 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1251 req->serno, ccb, req->ccb);
1252 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1253 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1254 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1255 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1256 req->state |= REQ_STATE_TIMEDOUT;
1257 mpt_wakeup_recovery_thread(mpt);
1262 * Callback routine from bus_dmamap_load_ccb(9) or, in simple cases, called
1265 * Takes a list of physical segments and builds the SGL for SCSI IO command
1266 * and forwards the commard to the IOC after one last check that CAM has not
1267 * aborted the transaction.
1270 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1272 request_t *req, *trq;
1275 struct mpt_softc *mpt;
1276 bus_addr_t chain_list_addr;
1277 int first_lim, seg, this_seg_lim;
1278 uint32_t addr, cur_off, flags, nxt_off, tf;
1280 MSG_REQUEST_HEADER *hdrp;
1285 req = (request_t *)arg;
1288 mpt = ccb->ccb_h.ccb_mpt_ptr;
1289 req = ccb->ccb_h.ccb_req_ptr;
1291 hdrp = req->req_vbuf;
1292 mpt_off = req->req_vbuf;
1294 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1299 switch (hdrp->Function) {
1300 case MPI_FUNCTION_SCSI_IO_REQUEST:
1301 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1303 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1305 case MPI_FUNCTION_TARGET_ASSIST:
1307 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1310 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1317 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1319 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1320 nseg, mpt->max_seg_cnt);
1325 if (error != EFBIG && error != ENOMEM) {
1326 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1328 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1330 mpt_freeze_ccb(ccb);
1331 if (error == EFBIG) {
1332 status = CAM_REQ_TOO_BIG;
1333 } else if (error == ENOMEM) {
1334 if (mpt->outofbeer == 0) {
1336 xpt_freeze_simq(mpt->sim, 1);
1337 mpt_lprt(mpt, MPT_PRT_DEBUG,
1340 status = CAM_REQUEUE_REQ;
1342 status = CAM_REQ_CMP_ERR;
1344 mpt_set_ccb_status(ccb, status);
1346 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1347 request_t *cmd_req =
1348 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1349 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1350 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1351 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1353 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1354 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1356 mpt_free_request(mpt, req);
1361 * No data to transfer?
1362 * Just make a single simple SGL with zero length.
1365 if (mpt->verbose >= MPT_PRT_DEBUG) {
1366 int tidx = ((char *)sglp) - mpt_off;
1367 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1371 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1372 MPI_pSGE_SET_FLAGS(se1,
1373 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1374 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1375 se1->FlagsLength = htole32(se1->FlagsLength);
1380 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1382 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1383 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1386 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1387 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1391 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1392 bus_dmasync_op_t op;
1394 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1395 op = BUS_DMASYNC_PREREAD;
1397 op = BUS_DMASYNC_PREWRITE;
1400 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1401 op = BUS_DMASYNC_PREWRITE;
1403 op = BUS_DMASYNC_PREREAD;
1406 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1410 * Okay, fill in what we can at the end of the command frame.
1411 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1412 * the command frame.
1414 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1415 * SIMPLE64 pointers and start doing CHAIN64 entries after
1419 if (nseg < MPT_NSGL_FIRST(mpt)) {
1423 * Leave room for CHAIN element
1425 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1428 se = (SGE_SIMPLE64 *) sglp;
1429 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1431 memset(se, 0, sizeof (*se));
1432 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1433 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1434 if (sizeof(bus_addr_t) > 4) {
1435 addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1436 /* SAS1078 36GB limitation WAR */
1437 if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr +
1438 MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) {
1440 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1442 se->Address.High = htole32(addr);
1444 if (seg == first_lim - 1) {
1445 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1447 if (seg == nseg - 1) {
1448 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1449 MPI_SGE_FLAGS_END_OF_BUFFER;
1451 MPI_pSGE_SET_FLAGS(se, tf);
1452 se->FlagsLength = htole32(se->FlagsLength);
1460 * Tell the IOC where to find the first chain element.
1462 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1463 nxt_off = MPT_RQSL(mpt);
1467 * Make up the rest of the data segments out of a chain element
1468 * (contained in the current request frame) which points to
1469 * SIMPLE64 elements in the next request frame, possibly ending
1470 * with *another* chain element (if there's more).
1472 while (seg < nseg) {
1474 * Point to the chain descriptor. Note that the chain
1475 * descriptor is at the end of the *previous* list (whether
1478 ce = (SGE_CHAIN64 *) se;
1481 * Before we change our current pointer, make sure we won't
1482 * overflow the request area with this frame. Note that we
1483 * test against 'greater than' here as it's okay in this case
1484 * to have next offset be just outside the request area.
1486 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1487 nxt_off = MPT_REQUEST_AREA;
1492 * Set our SGE element pointer to the beginning of the chain
1493 * list and update our next chain list offset.
1495 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1497 nxt_off += MPT_RQSL(mpt);
1500 * Now initialize the chain descriptor.
1502 memset(ce, 0, sizeof (*ce));
1505 * Get the physical address of the chain list.
1507 chain_list_addr = trq->req_pbuf;
1508 chain_list_addr += cur_off;
1509 if (sizeof (bus_addr_t) > 4) {
1511 htole32(((uint64_t)chain_list_addr) >> 32);
1513 ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1514 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1515 MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1518 * If we have more than a frame's worth of segments left,
1519 * set up the chain list to have the last element be another
1522 if ((nseg - seg) > MPT_NSGL(mpt)) {
1523 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1525 * The length of the chain is the length in bytes of the
1526 * number of segments plus the next chain element.
1528 * The next chain descriptor offset is the length,
1529 * in words, of the number of segments.
1531 ce->Length = (this_seg_lim - seg) *
1532 sizeof (SGE_SIMPLE64);
1533 ce->NextChainOffset = ce->Length >> 2;
1534 ce->Length += sizeof (SGE_CHAIN64);
1536 this_seg_lim = nseg;
1537 ce->Length = (this_seg_lim - seg) *
1538 sizeof (SGE_SIMPLE64);
1540 ce->Length = htole16(ce->Length);
1543 * Fill in the chain list SGE elements with our segment data.
1545 * If we're the last element in this chain list, set the last
1546 * element flag. If we're the completely last element period,
1547 * set the end of list and end of buffer flags.
1549 while (seg < this_seg_lim) {
1551 memset(se, 0, sizeof (*se));
1552 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1553 se->Address.Low = htole32(dm_segs->ds_addr &
1555 if (sizeof (bus_addr_t) > 4) {
1556 addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1557 /* SAS1078 36GB limitation WAR */
1559 (((uint64_t)dm_segs->ds_addr +
1560 MPI_SGE_LENGTH(se->FlagsLength)) >>
1563 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1565 se->Address.High = htole32(addr);
1567 if (seg == this_seg_lim - 1) {
1568 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1570 if (seg == nseg - 1) {
1571 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1572 MPI_SGE_FLAGS_END_OF_BUFFER;
1574 MPI_pSGE_SET_FLAGS(se, tf);
1575 se->FlagsLength = htole32(se->FlagsLength);
1583 * If we have more segments to do and we've used up all of
1584 * the space in a request area, go allocate another one
1585 * and chain to that.
1587 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1590 nrq = mpt_get_request(mpt, FALSE);
1598 * Append the new request area on the tail of our list.
1600 if ((trq = req->chain) == NULL) {
1603 while (trq->chain != NULL) {
1609 mpt_off = trq->req_vbuf;
1610 if (mpt->verbose >= MPT_PRT_DEBUG) {
1611 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1619 * Last time we need to check if this CCB needs to be aborted.
1621 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1622 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1623 request_t *cmd_req =
1624 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1625 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1626 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1627 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1630 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1631 ccb->ccb_h.status & CAM_STATUS_MASK);
1633 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1635 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1636 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1638 mpt_free_request(mpt, req);
1642 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1643 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1644 mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
1647 if (mpt->verbose > MPT_PRT_DEBUG) {
1649 mpt_print_request(req->req_vbuf);
1650 for (trq = req->chain; trq; trq = trq->chain) {
1651 printf(" Additional Chain Area %d\n", nc++);
1652 mpt_dump_sgl(trq->req_vbuf, 0);
1656 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1657 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1658 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1659 #ifdef WE_TRUST_AUTO_GOOD_STATUS
1660 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1661 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1662 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1664 tgt->state = TGT_STATE_MOVING_DATA;
1667 tgt->state = TGT_STATE_MOVING_DATA;
1670 mpt_send_cmd(mpt, req);
1674 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1676 request_t *req, *trq;
1679 struct mpt_softc *mpt;
1681 uint32_t flags, nxt_off;
1683 MSG_REQUEST_HEADER *hdrp;
1688 req = (request_t *)arg;
1691 mpt = ccb->ccb_h.ccb_mpt_ptr;
1692 req = ccb->ccb_h.ccb_req_ptr;
1694 hdrp = req->req_vbuf;
1695 mpt_off = req->req_vbuf;
1697 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1702 switch (hdrp->Function) {
1703 case MPI_FUNCTION_SCSI_IO_REQUEST:
1704 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1705 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1707 case MPI_FUNCTION_TARGET_ASSIST:
1709 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1712 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1719 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1721 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1722 nseg, mpt->max_seg_cnt);
1727 if (error != EFBIG && error != ENOMEM) {
1728 mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1730 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1732 mpt_freeze_ccb(ccb);
1733 if (error == EFBIG) {
1734 status = CAM_REQ_TOO_BIG;
1735 } else if (error == ENOMEM) {
1736 if (mpt->outofbeer == 0) {
1738 xpt_freeze_simq(mpt->sim, 1);
1739 mpt_lprt(mpt, MPT_PRT_DEBUG,
1742 status = CAM_REQUEUE_REQ;
1744 status = CAM_REQ_CMP_ERR;
1746 mpt_set_ccb_status(ccb, status);
1748 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1749 request_t *cmd_req =
1750 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1751 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1752 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1753 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1755 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1756 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1758 mpt_free_request(mpt, req);
1763 * No data to transfer?
1764 * Just make a single simple SGL with zero length.
1767 if (mpt->verbose >= MPT_PRT_DEBUG) {
1768 int tidx = ((char *)sglp) - mpt_off;
1769 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1773 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1774 MPI_pSGE_SET_FLAGS(se1,
1775 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1776 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1777 se1->FlagsLength = htole32(se1->FlagsLength);
1782 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1784 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1785 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1788 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1789 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1793 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1794 bus_dmasync_op_t op;
1796 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1797 op = BUS_DMASYNC_PREREAD;
1799 op = BUS_DMASYNC_PREWRITE;
1802 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1803 op = BUS_DMASYNC_PREWRITE;
1805 op = BUS_DMASYNC_PREREAD;
1808 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1812 * Okay, fill in what we can at the end of the command frame.
1813 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1814 * the command frame.
1816 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1817 * SIMPLE32 pointers and start doing CHAIN32 entries after
1821 if (nseg < MPT_NSGL_FIRST(mpt)) {
1825 * Leave room for CHAIN element
1827 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1830 se = (SGE_SIMPLE32 *) sglp;
1831 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1834 memset(se, 0,sizeof (*se));
1835 se->Address = htole32(dm_segs->ds_addr);
1837 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1839 if (seg == first_lim - 1) {
1840 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1842 if (seg == nseg - 1) {
1843 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1844 MPI_SGE_FLAGS_END_OF_BUFFER;
1846 MPI_pSGE_SET_FLAGS(se, tf);
1847 se->FlagsLength = htole32(se->FlagsLength);
1855 * Tell the IOC where to find the first chain element.
1857 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1858 nxt_off = MPT_RQSL(mpt);
1862 * Make up the rest of the data segments out of a chain element
1863 * (contained in the current request frame) which points to
1864 * SIMPLE32 elements in the next request frame, possibly ending
1865 * with *another* chain element (if there's more).
1867 while (seg < nseg) {
1869 uint32_t tf, cur_off;
1870 bus_addr_t chain_list_addr;
1873 * Point to the chain descriptor. Note that the chain
1874 * descriptor is at the end of the *previous* list (whether
1877 ce = (SGE_CHAIN32 *) se;
1880 * Before we change our current pointer, make sure we won't
1881 * overflow the request area with this frame. Note that we
1882 * test against 'greater than' here as it's okay in this case
1883 * to have next offset be just outside the request area.
1885 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1886 nxt_off = MPT_REQUEST_AREA;
1891 * Set our SGE element pointer to the beginning of the chain
1892 * list and update our next chain list offset.
1894 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1896 nxt_off += MPT_RQSL(mpt);
1899 * Now initialize the chain descriptor.
1901 memset(ce, 0, sizeof (*ce));
1904 * Get the physical address of the chain list.
1906 chain_list_addr = trq->req_pbuf;
1907 chain_list_addr += cur_off;
1911 ce->Address = htole32(chain_list_addr);
1912 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1916 * If we have more than a frame's worth of segments left,
1917 * set up the chain list to have the last element be another
1920 if ((nseg - seg) > MPT_NSGL(mpt)) {
1921 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1923 * The length of the chain is the length in bytes of the
1924 * number of segments plus the next chain element.
1926 * The next chain descriptor offset is the length,
1927 * in words, of the number of segments.
1929 ce->Length = (this_seg_lim - seg) *
1930 sizeof (SGE_SIMPLE32);
1931 ce->NextChainOffset = ce->Length >> 2;
1932 ce->Length += sizeof (SGE_CHAIN32);
1934 this_seg_lim = nseg;
1935 ce->Length = (this_seg_lim - seg) *
1936 sizeof (SGE_SIMPLE32);
1938 ce->Length = htole16(ce->Length);
1941 * Fill in the chain list SGE elements with our segment data.
1943 * If we're the last element in this chain list, set the last
1944 * element flag. If we're the completely last element period,
1945 * set the end of list and end of buffer flags.
1947 while (seg < this_seg_lim) {
1948 memset(se, 0, sizeof (*se));
1949 se->Address = htole32(dm_segs->ds_addr);
1951 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1953 if (seg == this_seg_lim - 1) {
1954 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1956 if (seg == nseg - 1) {
1957 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1958 MPI_SGE_FLAGS_END_OF_BUFFER;
1960 MPI_pSGE_SET_FLAGS(se, tf);
1961 se->FlagsLength = htole32(se->FlagsLength);
1969 * If we have more segments to do and we've used up all of
1970 * the space in a request area, go allocate another one
1971 * and chain to that.
1973 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1976 nrq = mpt_get_request(mpt, FALSE);
1984 * Append the new request area on the tail of our list.
1986 if ((trq = req->chain) == NULL) {
1989 while (trq->chain != NULL) {
1995 mpt_off = trq->req_vbuf;
1996 if (mpt->verbose >= MPT_PRT_DEBUG) {
1997 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
2005 * Last time we need to check if this CCB needs to be aborted.
2007 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2008 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2009 request_t *cmd_req =
2010 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2011 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2012 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2013 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2016 "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2017 ccb->ccb_h.status & CAM_STATUS_MASK);
2019 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2021 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2022 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2024 mpt_free_request(mpt, req);
2028 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2029 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2030 mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
2033 if (mpt->verbose > MPT_PRT_DEBUG) {
2035 mpt_print_request(req->req_vbuf);
2036 for (trq = req->chain; trq; trq = trq->chain) {
2037 printf(" Additional Chain Area %d\n", nc++);
2038 mpt_dump_sgl(trq->req_vbuf, 0);
2042 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2043 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2044 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2045 #ifdef WE_TRUST_AUTO_GOOD_STATUS
2046 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2047 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2048 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2050 tgt->state = TGT_STATE_MOVING_DATA;
2053 tgt->state = TGT_STATE_MOVING_DATA;
2056 mpt_send_cmd(mpt, req);
2060 mpt_start(struct cam_sim *sim, union ccb *ccb)
2063 struct mpt_softc *mpt;
2064 MSG_SCSI_IO_REQUEST *mpt_req;
2065 struct ccb_scsiio *csio = &ccb->csio;
2066 struct ccb_hdr *ccbh = &ccb->ccb_h;
2067 bus_dmamap_callback_t *cb;
2072 /* Get the pointer for the physical addapter */
2073 mpt = ccb->ccb_h.ccb_mpt_ptr;
2074 raid_passthru = (sim == mpt->phydisk_sim);
2076 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2077 if (mpt->outofbeer == 0) {
2079 xpt_freeze_simq(mpt->sim, 1);
2080 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2082 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2083 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2088 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2091 if (sizeof (bus_addr_t) > 4) {
2092 cb = mpt_execute_req_a64;
2094 cb = mpt_execute_req;
2098 * Link the ccb and the request structure so we can find
2099 * the other knowing either the request or the ccb
2102 ccb->ccb_h.ccb_req_ptr = req;
2104 /* Now we build the command for the IOC */
2105 mpt_req = req->req_vbuf;
2106 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2108 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2109 if (raid_passthru) {
2110 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2111 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2112 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2113 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2117 mpt_req->Bus = 0; /* we never set bus here */
2119 tgt = ccb->ccb_h.target_id;
2120 mpt_req->Bus = 0; /* XXX */
2123 mpt_req->SenseBufferLength =
2124 (csio->sense_len < MPT_SENSE_SIZE) ?
2125 csio->sense_len : MPT_SENSE_SIZE;
2128 * We use the message context to find the request structure when we
2129 * Get the command completion interrupt from the IOC.
2131 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2133 /* Which physical device to do the I/O on */
2134 mpt_req->TargetID = tgt;
2136 be64enc(mpt_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
2138 /* Set the direction of the transfer */
2139 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2140 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2141 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2142 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2144 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2147 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2148 switch(ccb->csio.tag_action) {
2149 case MSG_HEAD_OF_Q_TAG:
2150 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2153 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2155 case MSG_ORDERED_Q_TAG:
2156 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2158 case MSG_SIMPLE_Q_TAG:
2160 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2164 if (mpt->is_fc || mpt->is_sas) {
2165 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2167 /* XXX No such thing for a target doing packetized. */
2168 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2173 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2174 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2177 mpt_req->Control = htole32(mpt_req->Control);
2179 /* Copy the scsi command block into place */
2180 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2181 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2183 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2186 mpt_req->CDBLength = csio->cdb_len;
2187 mpt_req->DataLength = htole32(csio->dxfer_len);
2188 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2191 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2193 if (mpt->verbose == MPT_PRT_DEBUG) {
2195 mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2196 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2197 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2198 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2199 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2200 mpt_prtc(mpt, "(%s %u byte%s ",
2201 (df == MPI_SCSIIO_CONTROL_READ)?
2202 "read" : "write", csio->dxfer_len,
2203 (csio->dxfer_len == 1)? ")" : "s)");
2205 mpt_prtc(mpt, "tgt %u lun %jx req %p:%u\n", tgt,
2206 (uintmax_t)ccb->ccb_h.target_lun, req, req->serno);
2209 error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb,
2211 if (error == EINPROGRESS) {
2213 * So as to maintain ordering, freeze the controller queue
2214 * until our mapping is returned.
2216 xpt_freeze_simq(mpt->sim, 1);
2217 ccbh->status |= CAM_RELEASE_SIMQ;
2222 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2229 error = mpt_scsi_send_tmf(mpt,
2230 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2231 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2232 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2233 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2234 0, /* XXX How do I get the channel ID? */
2235 tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2236 lun != CAM_LUN_WILDCARD ? lun : 0,
2241 * mpt_scsi_send_tmf hard resets on failure, so no
2242 * need to do so here.
2245 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2249 /* Wait for bus reset to be processed by the IOC. */
2250 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2251 REQ_STATE_DONE, sleep_ok, 5000);
2253 status = le16toh(mpt->tmf_req->IOCStatus);
2254 response = mpt->tmf_req->ResponseCode;
2255 mpt->tmf_req->state = REQ_STATE_FREE;
2258 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2259 "Resetting controller.\n");
2260 mpt_reset(mpt, TRUE);
2264 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2265 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2266 "Resetting controller.\n", status);
2267 mpt_reset(mpt, TRUE);
2271 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2272 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2273 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2274 "Resetting controller.\n", response);
2275 mpt_reset(mpt, TRUE);
2282 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2286 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2288 req = mpt_get_request(mpt, FALSE);
2293 memset(fc, 0, sizeof(*fc));
2294 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2295 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2296 fc->MsgContext = htole32(req->index | fc_els_handler_id);
2297 mpt_send_cmd(mpt, req);
2299 r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2300 REQ_STATE_DONE, FALSE, 60 * 1000);
2302 mpt_free_request(mpt, req);
2309 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2310 MSG_EVENT_NOTIFY_REPLY *msg)
2312 uint32_t data0, data1;
2314 data0 = le32toh(msg->Data[0]);
2315 data1 = le32toh(msg->Data[1]);
2316 switch(msg->Event & 0xFF) {
2317 case MPI_EVENT_UNIT_ATTENTION:
2318 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2319 (data0 >> 8) & 0xff, data0 & 0xff);
2322 case MPI_EVENT_IOC_BUS_RESET:
2323 /* We generated a bus reset */
2324 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2325 (data0 >> 8) & 0xff);
2326 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2329 case MPI_EVENT_EXT_BUS_RESET:
2330 /* Someone else generated a bus reset */
2331 mpt_prt(mpt, "External Bus Reset Detected\n");
2333 * These replies don't return EventData like the MPI
2336 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2339 case MPI_EVENT_RESCAN:
2344 * In general this means a device has been added to the loop.
2346 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2347 if (mpt->ready == 0) {
2350 if (mpt->phydisk_sim) {
2351 pathid = cam_sim_path(mpt->phydisk_sim);
2353 pathid = cam_sim_path(mpt->sim);
2356 * Allocate a CCB, create a wildcard path for this bus,
2357 * and schedule a rescan.
2359 ccb = xpt_alloc_ccb_nowait();
2361 mpt_prt(mpt, "unable to alloc CCB for rescan\n");
2365 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
2366 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2367 mpt_prt(mpt, "unable to create path for rescan\n");
2375 case MPI_EVENT_LINK_STATUS_CHANGE:
2376 mpt_prt(mpt, "Port %d: LinkState: %s\n",
2377 (data1 >> 8) & 0xff,
2378 ((data0 & 0xff) == 0)? "Failed" : "Active");
2381 case MPI_EVENT_LOOP_STATE_CHANGE:
2382 switch ((data0 >> 16) & 0xff) {
2385 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2386 "(Loop Initialization)\n",
2387 (data1 >> 8) & 0xff,
2388 (data0 >> 8) & 0xff,
2390 switch ((data0 >> 8) & 0xff) {
2392 if ((data0 & 0xff) == 0xF7) {
2393 mpt_prt(mpt, "Device needs AL_PA\n");
2395 mpt_prt(mpt, "Device %02x doesn't like "
2401 if ((data0 & 0xff) == 0xF7) {
2402 mpt_prt(mpt, "Device had loop failure "
2403 "at its receiver prior to acquiring"
2406 mpt_prt(mpt, "Device %02x detected loop"
2407 " failure at its receiver\n",
2412 mpt_prt(mpt, "Device %02x requests that device "
2413 "%02x reset itself\n",
2415 (data0 >> 8) & 0xFF);
2420 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2421 "LPE(%02x,%02x) (Loop Port Enable)\n",
2422 (data1 >> 8) & 0xff, /* Port */
2423 (data0 >> 8) & 0xff, /* Character 3 */
2424 (data0 ) & 0xff /* Character 4 */);
2427 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2428 "LPB(%02x,%02x) (Loop Port Bypass)\n",
2429 (data1 >> 8) & 0xff, /* Port */
2430 (data0 >> 8) & 0xff, /* Character 3 */
2431 (data0 ) & 0xff /* Character 4 */);
2434 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2435 "FC event (%02x %02x %02x)\n",
2436 (data1 >> 8) & 0xff, /* Port */
2437 (data0 >> 16) & 0xff, /* Event */
2438 (data0 >> 8) & 0xff, /* Character 3 */
2439 (data0 ) & 0xff /* Character 4 */);
2443 case MPI_EVENT_LOGOUT:
2444 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2445 (data1 >> 8) & 0xff, data0);
2447 case MPI_EVENT_QUEUE_FULL:
2449 struct cam_sim *sim;
2450 struct cam_path *tmppath;
2451 struct ccb_relsim crs;
2452 PTR_EVENT_DATA_QUEUE_FULL pqf;
2455 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2456 pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2458 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x "
2460 pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2462 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2463 pqf->TargetID) != 0) {
2464 sim = mpt->phydisk_sim;
2468 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2469 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2470 pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2471 mpt_prt(mpt, "unable to create a path to send "
2475 xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2476 crs.ccb_h.func_code = XPT_REL_SIMQ;
2477 crs.ccb_h.flags = CAM_DEV_QFREEZE;
2478 crs.release_flags = RELSIM_ADJUST_OPENINGS;
2479 crs.openings = pqf->CurrentDepth - 1;
2480 xpt_action((union ccb *)&crs);
2481 if (crs.ccb_h.status != CAM_REQ_CMP) {
2482 mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2484 xpt_free_path(tmppath);
2488 case MPI_EVENT_IR_RESYNC_UPDATE:
2489 mpt_prt(mpt, "IR resync update %d completed\n",
2490 (data0 >> 16) & 0xff);
2492 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2495 struct cam_sim *sim;
2496 struct cam_path *tmppath;
2497 PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
2499 psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
2500 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2501 psdsc->TargetID) != 0)
2502 sim = mpt->phydisk_sim;
2505 switch(psdsc->ReasonCode) {
2506 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2507 ccb = xpt_alloc_ccb_nowait();
2510 "unable to alloc CCB for rescan\n");
2513 if (xpt_create_path(&ccb->ccb_h.path, NULL,
2514 cam_sim_path(sim), psdsc->TargetID,
2515 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2517 "unable to create path for rescan\n");
2523 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2524 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2525 psdsc->TargetID, CAM_LUN_WILDCARD) !=
2528 "unable to create path for async event");
2531 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2532 xpt_free_path(tmppath);
2534 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
2535 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
2536 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2539 mpt_lprt(mpt, MPT_PRT_WARN,
2540 "SAS device status change: Bus: 0x%02x TargetID: "
2541 "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
2542 psdsc->TargetID, psdsc->ReasonCode);
2547 case MPI_EVENT_SAS_DISCOVERY_ERROR:
2549 PTR_EVENT_DATA_DISCOVERY_ERROR pde;
2551 pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
2552 pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
2553 mpt_lprt(mpt, MPT_PRT_WARN,
2554 "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
2555 pde->Port, pde->DiscoveryStatus);
2558 case MPI_EVENT_EVENT_CHANGE:
2559 case MPI_EVENT_INTEGRATED_RAID:
2561 case MPI_EVENT_LOG_ENTRY_ADDED:
2562 case MPI_EVENT_SAS_DISCOVERY:
2563 case MPI_EVENT_SAS_PHY_LINK_STATUS:
2564 case MPI_EVENT_SAS_SES:
2567 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2575 * Reply path for all SCSI I/O requests, called from our
2576 * interrupt handler by extracting our handler index from
2577 * the MsgContext field of the reply from the IOC.
2579 * This routine is optimized for the common case of a
2580 * completion without error. All exception handling is
2581 * offloaded to non-inlined helper routines to minimize
2585 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2586 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2588 MSG_SCSI_IO_REQUEST *scsi_req;
2591 if (req->state == REQ_STATE_FREE) {
2592 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2596 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2599 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2604 mpt_req_untimeout(req, mpt_timeout, ccb);
2605 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2607 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2608 bus_dmasync_op_t op;
2610 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2611 op = BUS_DMASYNC_POSTREAD;
2613 op = BUS_DMASYNC_POSTWRITE;
2614 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2615 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2618 if (reply_frame == NULL) {
2620 * Context only reply, completion without error status.
2622 ccb->csio.resid = 0;
2623 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2624 ccb->csio.scsi_status = SCSI_STATUS_OK;
2626 mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2629 if (mpt->outofbeer) {
2630 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2632 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2634 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2635 struct scsi_inquiry_data *iq =
2636 (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2637 if (scsi_req->Function ==
2638 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2640 * Fake out the device type so that only the
2641 * pass-thru device will attach.
2643 iq->device &= ~0x1F;
2644 iq->device |= T_NODEVICE;
2647 if (mpt->verbose == MPT_PRT_DEBUG) {
2648 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2651 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2653 if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2654 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2656 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2658 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2660 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2661 ("CCB req needed wakeup"));
2663 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2665 mpt_free_request(mpt, req);
2670 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2671 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2673 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2675 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2677 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2679 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2680 /* Record IOC Status and Response Code of TMF for any waiters. */
2681 req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2682 req->ResponseCode = tmf_reply->ResponseCode;
2684 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2685 req, req->serno, le16toh(tmf_reply->IOCStatus));
2686 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2687 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2688 req->state |= REQ_STATE_DONE;
2691 mpt->tmf_req->state = REQ_STATE_FREE;
2697 * XXX: Move to definitions file
2715 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2716 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2719 MSG_LINK_SERVICE_RSP_REQUEST tmp;
2720 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2723 * We are going to reuse the ELS request to send this response back.
2726 memset(rsp, 0, sizeof(*rsp));
2728 #ifdef USE_IMMEDIATE_LINK_DATA
2730 * Apparently the IMMEDIATE stuff doesn't seem to work.
2732 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2734 rsp->RspLength = length;
2735 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2736 rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2739 * Copy over information from the original reply frame to
2740 * it's correct place in the response.
2742 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2745 * And now copy back the temporary area to the original frame.
2747 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2748 rsp = req->req_vbuf;
2750 #ifdef USE_IMMEDIATE_LINK_DATA
2751 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2754 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2755 bus_addr_t paddr = req->req_pbuf;
2756 paddr += MPT_RQSL(mpt);
2759 MPI_SGE_FLAGS_HOST_TO_IOC |
2760 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2761 MPI_SGE_FLAGS_LAST_ELEMENT |
2762 MPI_SGE_FLAGS_END_OF_LIST |
2763 MPI_SGE_FLAGS_END_OF_BUFFER;
2764 fl <<= MPI_SGE_FLAGS_SHIFT;
2766 se->FlagsLength = htole32(fl);
2767 se->Address = htole32((uint32_t) paddr);
2774 mpt_send_cmd(mpt, req);
2778 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2779 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2781 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2782 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2786 U16 status = le16toh(reply_frame->IOCStatus);
2789 int do_refresh = TRUE;
2792 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2793 ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2794 req, req->serno, rp->Function));
2795 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2796 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2798 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2801 mpt_lprt(mpt, MPT_PRT_DEBUG,
2802 "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2803 req, req->serno, reply_frame, reply_frame->Function);
2805 if (status != MPI_IOCSTATUS_SUCCESS) {
2806 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2807 status, reply_frame->Function);
2808 if (status == MPI_IOCSTATUS_INVALID_STATE) {
2810 * XXX: to get around shutdown issue
2819 * If the function of a link service response, we recycle the
2820 * response to be a refresh for a new link service request.
2822 * The request pointer is bogus in this case and we have to fetch
2823 * it based upon the TransactionContext.
2825 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2826 /* Freddie Uncle Charlie Katie */
2827 /* We don't get the IOINDEX as part of the Link Svc Rsp */
2828 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2829 if (mpt->els_cmd_ptrs[ioindex] == req) {
2833 KASSERT(ioindex < mpt->els_cmds_allocated,
2834 ("can't find my mommie!"));
2836 /* remove from active list as we're going to re-post it */
2837 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2838 req->state &= ~REQ_STATE_QUEUED;
2839 req->state |= REQ_STATE_DONE;
2840 mpt_fc_post_els(mpt, req, ioindex);
2844 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2845 /* remove from active list as we're done */
2846 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2847 req->state &= ~REQ_STATE_QUEUED;
2848 req->state |= REQ_STATE_DONE;
2849 if (req->state & REQ_STATE_TIMEDOUT) {
2850 mpt_lprt(mpt, MPT_PRT_DEBUG,
2851 "Sync Primitive Send Completed After Timeout\n");
2852 mpt_free_request(mpt, req);
2853 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2854 mpt_lprt(mpt, MPT_PRT_DEBUG,
2855 "Async Primitive Send Complete\n");
2856 mpt_free_request(mpt, req);
2858 mpt_lprt(mpt, MPT_PRT_DEBUG,
2859 "Sync Primitive Send Complete- Waking Waiter\n");
2865 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2866 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2867 "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2868 rp->MsgLength, rp->MsgFlags);
2872 if (rp->MsgLength <= 5) {
2874 * This is just a ack of an original ELS buffer post
2876 mpt_lprt(mpt, MPT_PRT_DEBUG,
2877 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2882 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2883 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2885 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2886 cmd = be32toh(elsbuf[0]) >> 24;
2888 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2889 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2893 ioindex = le32toh(rp->TransactionContext);
2894 req = mpt->els_cmd_ptrs[ioindex];
2896 if (rctl == ELS && type == 1) {
2900 * Send back a PRLI ACC
2902 mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2903 le32toh(rp->Wwn.PortNameHigh),
2904 le32toh(rp->Wwn.PortNameLow));
2905 elsbuf[0] = htobe32(0x02100014);
2906 elsbuf[1] |= htobe32(0x00000100);
2907 elsbuf[4] = htobe32(0x00000002);
2908 if (mpt->role & MPT_ROLE_TARGET)
2909 elsbuf[4] |= htobe32(0x00000010);
2910 if (mpt->role & MPT_ROLE_INITIATOR)
2911 elsbuf[4] |= htobe32(0x00000020);
2912 /* remove from active list as we're done */
2913 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2914 req->state &= ~REQ_STATE_QUEUED;
2915 req->state |= REQ_STATE_DONE;
2916 mpt_fc_els_send_response(mpt, req, rp, 20);
2920 memset(elsbuf, 0, 5 * (sizeof (U32)));
2921 elsbuf[0] = htobe32(0x02100014);
2922 elsbuf[1] = htobe32(0x08000100);
2923 mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2924 le32toh(rp->Wwn.PortNameHigh),
2925 le32toh(rp->Wwn.PortNameLow));
2926 /* remove from active list as we're done */
2927 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2928 req->state &= ~REQ_STATE_QUEUED;
2929 req->state |= REQ_STATE_DONE;
2930 mpt_fc_els_send_response(mpt, req, rp, 20);
2934 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2937 } else if (rctl == ABTS && type == 0) {
2938 uint16_t rx_id = le16toh(rp->Rxid);
2939 uint16_t ox_id = le16toh(rp->Oxid);
2940 mpt_tgt_state_t *tgt;
2941 request_t *tgt_req = NULL;
2946 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2947 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2948 le32toh(rp->Wwn.PortNameLow));
2949 if (rx_id >= mpt->mpt_max_tgtcmds) {
2950 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2951 } else if (mpt->tgt_cmd_ptrs == NULL) {
2952 mpt_prt(mpt, "No TGT CMD PTRS\n");
2954 tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2956 if (tgt_req == NULL) {
2957 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2960 tgt = MPT_TGT_STATE(mpt, tgt_req);
2962 /* Check to make sure we have the correct command. */
2963 ct_id = GET_IO_INDEX(tgt->reply_desc);
2964 if (ct_id != rx_id) {
2965 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2966 "RX_ID received=0x%x, in cmd=0x%x\n", rx_id, ct_id);
2969 if (tgt->itag != ox_id) {
2970 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2971 "OX_ID received=0x%x, in cmd=0x%x\n", ox_id, tgt->itag);
2975 if ((ccb = tgt->ccb) != NULL) {
2976 mpt_prt(mpt, "CCB (%p): lun %jx flags %x status %x\n",
2977 ccb, (uintmax_t)ccb->ccb_h.target_lun,
2978 ccb->ccb_h.flags, ccb->ccb_h.status);
2980 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2981 "%x nxfers %x\n", tgt->state, tgt->resid,
2982 tgt->bytes_xfered, tgt->reply_desc, tgt->nxfers);
2983 if (mpt_abort_target_cmd(mpt, tgt_req))
2984 mpt_prt(mpt, "unable to start TargetAbort\n");
2987 memset(elsbuf, 0, 5 * (sizeof (U32)));
2988 elsbuf[0] = htobe32(0);
2989 elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2990 elsbuf[2] = htobe32(0x000ffff);
2992 * Dork with the reply frame so that the response to it
2995 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2996 /* remove from active list as we're done */
2997 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2998 req->state &= ~REQ_STATE_QUEUED;
2999 req->state |= REQ_STATE_DONE;
3000 mpt_fc_els_send_response(mpt, req, rp, 12);
3003 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3005 if (do_refresh == TRUE) {
3006 /* remove from active list as we're done */
3007 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3008 req->state &= ~REQ_STATE_QUEUED;
3009 req->state |= REQ_STATE_DONE;
3010 mpt_fc_post_els(mpt, req, ioindex);
3016 * Clean up all SCSI Initiator personality state in response
3017 * to a controller reset.
3020 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3024 * The pending list is already run down by
3025 * the generic handler. Perform the same
3026 * operation on the timed out request list.
3028 mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3029 MPI_IOCSTATUS_INVALID_STATE);
3032 * XXX: We need to repost ELS and Target Command Buffers?
3036 * Inform the XPT that a bus reset has occurred.
3038 xpt_async(AC_BUS_RESET, mpt->path, NULL);
3042 * Parse additional completion information in the reply
3043 * frame for SCSI I/O requests.
3046 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3047 MSG_DEFAULT_REPLY *reply_frame)
3050 MSG_SCSI_IO_REPLY *scsi_io_reply;
3054 MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3055 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3056 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3057 ("MPT SCSI I/O Handler called with incorrect reply type"));
3058 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3059 ("MPT SCSI I/O Handler called with continuation reply"));
3061 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3062 ioc_status = le16toh(scsi_io_reply->IOCStatus);
3063 ioc_status &= MPI_IOCSTATUS_MASK;
3064 sstate = scsi_io_reply->SCSIState;
3068 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3070 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3071 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3072 uint32_t sense_returned;
3074 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3076 sense_returned = le32toh(scsi_io_reply->SenseCount);
3077 if (sense_returned < ccb->csio.sense_len)
3078 ccb->csio.sense_resid = ccb->csio.sense_len -
3081 ccb->csio.sense_resid = 0;
3083 bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
3084 bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3085 min(ccb->csio.sense_len, sense_returned));
3088 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3090 * Tag messages rejected, but non-tagged retry
3093 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3097 switch(ioc_status) {
3098 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3101 * Linux driver indicates that a zero
3102 * transfer length with this error code
3103 * indicates a CRC error.
3105 * No need to swap the bytes for checking
3108 if (scsi_io_reply->TransferCount == 0) {
3109 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3113 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3114 case MPI_IOCSTATUS_SUCCESS:
3115 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3116 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3118 * Status was never returned for this transaction.
3120 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3121 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3122 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3123 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3124 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3125 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3126 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3128 /* XXX Handle SPI-Packet and FCP-2 response info. */
3129 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3131 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3133 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3134 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3136 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3137 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3139 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3141 * Since selection timeouts and "device really not
3142 * there" are grouped into this error code, report
3143 * selection timeout. Selection timeouts are
3144 * typically retried before giving up on the device
3145 * whereas "device not there" errors are considered
3148 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3150 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3151 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3153 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3154 mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3156 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3157 mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3159 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3160 ccb->ccb_h.status = CAM_UA_TERMIO;
3162 case MPI_IOCSTATUS_INVALID_STATE:
3164 * The IOC has been reset. Emulate a bus reset.
3167 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3168 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3170 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3171 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3173 * Don't clobber any timeout status that has
3174 * already been set for this transaction. We
3175 * want the SCSI layer to be able to differentiate
3176 * between the command we aborted due to timeout
3177 * and any innocent bystanders.
3179 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3181 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3184 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3185 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3187 case MPI_IOCSTATUS_BUSY:
3188 mpt_set_ccb_status(ccb, CAM_BUSY);
3190 case MPI_IOCSTATUS_INVALID_FUNCTION:
3191 case MPI_IOCSTATUS_INVALID_SGL:
3192 case MPI_IOCSTATUS_INTERNAL_ERROR:
3193 case MPI_IOCSTATUS_INVALID_FIELD:
3196 * Some of the above may need to kick
3197 * of a recovery action!!!!
3199 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3203 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3204 mpt_freeze_ccb(ccb);
3211 mpt_action(struct cam_sim *sim, union ccb *ccb)
3213 struct mpt_softc *mpt;
3214 struct ccb_trans_settings *cts;
3219 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3221 mpt = (struct mpt_softc *)cam_sim_softc(sim);
3222 raid_passthru = (sim == mpt->phydisk_sim);
3223 MPT_LOCK_ASSERT(mpt);
3225 tgt = ccb->ccb_h.target_id;
3226 lun = ccb->ccb_h.target_lun;
3227 if (raid_passthru &&
3228 ccb->ccb_h.func_code != XPT_PATH_INQ &&
3229 ccb->ccb_h.func_code != XPT_RESET_BUS &&
3230 ccb->ccb_h.func_code != XPT_RESET_DEV) {
3231 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3232 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3233 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3238 ccb->ccb_h.ccb_mpt_ptr = mpt;
3240 switch (ccb->ccb_h.func_code) {
3241 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3243 * Do a couple of preliminary checks...
3245 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3246 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3247 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3248 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3252 /* Max supported CDB length is 16 bytes */
3253 /* XXX Unless we implement the new 32byte message type */
3254 if (ccb->csio.cdb_len >
3255 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3256 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3257 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3260 #ifdef MPT_TEST_MULTIPATH
3261 if (mpt->failure_id == ccb->ccb_h.target_id) {
3262 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3263 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3267 ccb->csio.scsi_status = SCSI_STATUS_OK;
3268 mpt_start(sim, ccb);
3272 if (raid_passthru) {
3273 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3274 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3278 if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3280 xpt_print(ccb->ccb_h.path, "reset bus\n");
3283 xpt_print(ccb->ccb_h.path, "reset device\n");
3285 (void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3288 * mpt_bus_reset is always successful in that it
3289 * will fall back to a hard reset should a bus
3290 * reset attempt fail.
3292 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3293 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3298 union ccb *accb = ccb->cab.abort_ccb;
3299 switch (accb->ccb_h.func_code) {
3300 case XPT_ACCEPT_TARGET_IO:
3301 case XPT_IMMEDIATE_NOTIFY:
3302 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3304 case XPT_CONT_TARGET_IO:
3305 mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3306 ccb->ccb_h.status = CAM_UA_ABORT;
3309 ccb->ccb_h.status = CAM_UA_ABORT;
3312 ccb->ccb_h.status = CAM_REQ_INVALID;
3318 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3320 #define DP_DISC_ENABLE 0x1
3321 #define DP_DISC_DISABL 0x2
3322 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
3324 #define DP_TQING_ENABLE 0x4
3325 #define DP_TQING_DISABL 0x8
3326 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
3328 #define DP_WIDE 0x10
3329 #define DP_NARROW 0x20
3330 #define DP_WIDTH (DP_WIDE|DP_NARROW)
3332 #define DP_SYNC 0x40
3334 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
3336 struct ccb_trans_settings_scsi *scsi;
3337 struct ccb_trans_settings_spi *spi;
3345 if (mpt->is_fc || mpt->is_sas) {
3346 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3350 scsi = &cts->proto_specific.scsi;
3351 spi = &cts->xport_specific.spi;
3354 * We can be called just to valid transport and proto versions
3356 if (scsi->valid == 0 && spi->valid == 0) {
3357 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3362 * Skip attempting settings on RAID volume disks.
3363 * Other devices on the bus get the normal treatment.
3365 if (mpt->phydisk_sim && raid_passthru == 0 &&
3366 mpt_is_raid_volume(mpt, tgt) != 0) {
3367 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3368 "no transfer settings for RAID vols\n");
3369 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3373 i = mpt->mpt_port_page2.PortSettings &
3374 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3375 j = mpt->mpt_port_page2.PortFlags &
3376 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3377 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3378 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3379 mpt_lprt(mpt, MPT_PRT_ALWAYS,
3380 "honoring BIOS transfer negotiations\n");
3381 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3389 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3390 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3391 DP_DISC_ENABLE : DP_DISC_DISABL;
3394 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3395 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3396 DP_TQING_ENABLE : DP_TQING_DISABL;
3399 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3400 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3401 DP_WIDE : DP_NARROW;
3404 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3406 offset = spi->sync_offset;
3408 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3409 &mpt->mpt_dev_page1[tgt];
3410 offset = ptr->RequestedParameters;
3411 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3412 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3414 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3416 period = spi->sync_period;
3418 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3419 &mpt->mpt_dev_page1[tgt];
3420 period = ptr->RequestedParameters;
3421 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3422 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3425 if (dval & DP_DISC_ENABLE) {
3426 mpt->mpt_disc_enable |= (1 << tgt);
3427 } else if (dval & DP_DISC_DISABL) {
3428 mpt->mpt_disc_enable &= ~(1 << tgt);
3430 if (dval & DP_TQING_ENABLE) {
3431 mpt->mpt_tag_enable |= (1 << tgt);
3432 } else if (dval & DP_TQING_DISABL) {
3433 mpt->mpt_tag_enable &= ~(1 << tgt);
3435 if (dval & DP_WIDTH) {
3436 mpt_setwidth(mpt, tgt, 1);
3438 if (dval & DP_SYNC) {
3439 mpt_setsync(mpt, tgt, period, offset);
3442 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3445 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3446 "set [%d]: 0x%x period 0x%x offset %d\n",
3447 tgt, dval, period, offset);
3448 if (mpt_update_spi_config(mpt, tgt)) {
3449 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3451 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3455 case XPT_GET_TRAN_SETTINGS:
3457 struct ccb_trans_settings_scsi *scsi;
3459 cts->protocol = PROTO_SCSI;
3461 struct ccb_trans_settings_fc *fc =
3462 &cts->xport_specific.fc;
3463 cts->protocol_version = SCSI_REV_SPC;
3464 cts->transport = XPORT_FC;
3465 cts->transport_version = 0;
3466 if (mpt->mpt_fcport_speed != 0) {
3467 fc->valid = CTS_FC_VALID_SPEED;
3468 fc->bitrate = 100000 * mpt->mpt_fcport_speed;
3470 } else if (mpt->is_sas) {
3471 struct ccb_trans_settings_sas *sas =
3472 &cts->xport_specific.sas;
3473 cts->protocol_version = SCSI_REV_SPC2;
3474 cts->transport = XPORT_SAS;
3475 cts->transport_version = 0;
3476 sas->valid = CTS_SAS_VALID_SPEED;
3477 sas->bitrate = 300000;
3479 cts->protocol_version = SCSI_REV_2;
3480 cts->transport = XPORT_SPI;
3481 cts->transport_version = 2;
3482 if (mpt_get_spi_settings(mpt, cts) != 0) {
3483 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3487 scsi = &cts->proto_specific.scsi;
3488 scsi->valid = CTS_SCSI_VALID_TQ;
3489 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3490 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3493 case XPT_CALC_GEOMETRY:
3495 struct ccb_calc_geometry *ccg;
3498 if (ccg->block_size == 0) {
3499 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3500 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3503 cam_calc_geometry(ccg, /* extended */ 1);
3504 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
3507 case XPT_GET_SIM_KNOB:
3509 struct ccb_sim_knob *kp = &ccb->knob;
3512 kp->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn;
3513 kp->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn;
3514 switch (mpt->role) {
3516 kp->xport_specific.fc.role = KNOB_ROLE_NONE;
3518 case MPT_ROLE_INITIATOR:
3519 kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR;
3521 case MPT_ROLE_TARGET:
3522 kp->xport_specific.fc.role = KNOB_ROLE_TARGET;
3525 kp->xport_specific.fc.role = KNOB_ROLE_BOTH;
3528 kp->xport_specific.fc.valid =
3529 KNOB_VALID_ADDRESS | KNOB_VALID_ROLE;
3530 ccb->ccb_h.status = CAM_REQ_CMP;
3532 ccb->ccb_h.status = CAM_REQ_INVALID;
3537 case XPT_PATH_INQ: /* Path routing inquiry */
3539 struct ccb_pathinq *cpi = &ccb->cpi;
3541 cpi->version_num = 1;
3542 cpi->target_sprt = 0;
3543 cpi->hba_eng_cnt = 0;
3544 cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3545 cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3547 * FC cards report MAX_DEVICES of 512, but
3548 * the MSG_SCSI_IO_REQUEST target id field
3549 * is only 8 bits. Until we fix the driver
3550 * to support 'channels' for bus overflow,
3553 if (cpi->max_target > 255) {
3554 cpi->max_target = 255;
3558 * VMware ESX reports > 16 devices and then dies when we probe.
3560 if (mpt->is_spi && cpi->max_target > 15) {
3561 cpi->max_target = 15;
3566 cpi->max_lun = MPT_MAX_LUNS;
3567 cpi->initiator_id = mpt->mpt_ini_id;
3568 cpi->bus_id = cam_sim_bus(sim);
3571 * The base speed is the speed of the underlying connection.
3573 cpi->protocol = PROTO_SCSI;
3575 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED |
3577 cpi->base_transfer_speed = 100000;
3578 cpi->hba_inquiry = PI_TAG_ABLE;
3579 cpi->transport = XPORT_FC;
3580 cpi->transport_version = 0;
3581 cpi->protocol_version = SCSI_REV_SPC;
3582 cpi->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn;
3583 cpi->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn;
3584 cpi->xport_specific.fc.port = mpt->scinfo.fc.portid;
3585 cpi->xport_specific.fc.bitrate =
3586 100000 * mpt->mpt_fcport_speed;
3587 } else if (mpt->is_sas) {
3588 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED |
3590 cpi->base_transfer_speed = 300000;
3591 cpi->hba_inquiry = PI_TAG_ABLE;
3592 cpi->transport = XPORT_SAS;
3593 cpi->transport_version = 0;
3594 cpi->protocol_version = SCSI_REV_SPC2;
3596 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED |
3598 cpi->base_transfer_speed = 3300;
3599 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3600 cpi->transport = XPORT_SPI;
3601 cpi->transport_version = 2;
3602 cpi->protocol_version = SCSI_REV_2;
3606 * We give our fake RAID passhtru bus a width that is MaxVolumes
3607 * wide and restrict it to one lun.
3609 if (raid_passthru) {
3610 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3611 cpi->initiator_id = cpi->max_target + 1;
3615 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3616 cpi->hba_misc |= PIM_NOINITIATOR;
3618 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3620 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3622 cpi->target_sprt = 0;
3624 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3625 strlcpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3626 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3627 cpi->unit_number = cam_sim_unit(sim);
3628 cpi->ccb_h.status = CAM_REQ_CMP;
3631 case XPT_EN_LUN: /* Enable LUN as a target */
3635 if (ccb->cel.enable)
3636 result = mpt_enable_lun(mpt,
3637 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3639 result = mpt_disable_lun(mpt,
3640 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3642 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3644 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3648 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */
3649 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
3651 tgt_resource_t *trtp;
3652 lun_id_t lun = ccb->ccb_h.target_lun;
3653 ccb->ccb_h.sim_priv.entries[0].field = 0;
3654 ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3656 if (lun == CAM_LUN_WILDCARD) {
3657 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3658 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3661 trtp = &mpt->trt_wildcard;
3662 } else if (lun >= MPT_MAX_LUNS) {
3663 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3666 trtp = &mpt->trt[lun];
3668 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3669 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3670 "Put FREE ATIO %p lun %jx\n", ccb, (uintmax_t)lun);
3671 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3674 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3675 "Put FREE INOT lun %jx\n", (uintmax_t)lun);
3676 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3679 mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3682 case XPT_NOTIFY_ACKNOWLEDGE: /* Task management request done. */
3684 request_t *req = MPT_TAG_2_REQ(mpt, ccb->cna2.tag_id);
3686 mpt_lprt(mpt, MPT_PRT_DEBUG, "Got Notify ACK\n");
3687 mpt_scsi_tgt_status(mpt, NULL, req, 0, NULL, 0);
3688 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3691 case XPT_CONT_TARGET_IO:
3692 mpt_target_start_io(mpt, ccb);
3696 ccb->ccb_h.status = CAM_REQ_INVALID;
3703 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3705 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3706 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3708 uint32_t dval, pval, oval;
3711 if (IS_CURRENT_SETTINGS(cts) == 0) {
3712 tgt = cts->ccb_h.target_id;
3713 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3714 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3718 tgt = cts->ccb_h.target_id;
3722 * We aren't looking at Port Page 2 BIOS settings here-
3723 * sometimes these have been known to be bogus XXX.
3725 * For user settings, we pick the max from port page 0
3727 * For current settings we read the current settings out from
3728 * device page 0 for that target.
3730 if (IS_CURRENT_SETTINGS(cts)) {
3731 CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3734 tmp = mpt->mpt_dev_page0[tgt];
3735 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3736 sizeof(tmp), FALSE, 5000);
3738 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3741 mpt2host_config_page_scsi_device_0(&tmp);
3743 mpt_lprt(mpt, MPT_PRT_DEBUG,
3744 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3745 tmp.NegotiatedParameters, tmp.Information);
3746 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3747 DP_WIDE : DP_NARROW;
3748 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3749 DP_DISC_ENABLE : DP_DISC_DISABL;
3750 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3751 DP_TQING_ENABLE : DP_TQING_DISABL;
3752 oval = tmp.NegotiatedParameters;
3753 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3754 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3755 pval = tmp.NegotiatedParameters;
3756 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3757 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3758 mpt->mpt_dev_page0[tgt] = tmp;
3760 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3761 oval = mpt->mpt_port_page0.Capabilities;
3762 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3763 pval = mpt->mpt_port_page0.Capabilities;
3764 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3771 spi->sync_offset = oval;
3772 spi->sync_period = pval;
3773 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3774 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3775 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3776 if (dval & DP_WIDE) {
3777 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3779 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3781 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3782 scsi->valid = CTS_SCSI_VALID_TQ;
3783 if (dval & DP_TQING_ENABLE) {
3784 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3786 spi->valid |= CTS_SPI_VALID_DISC;
3787 if (dval & DP_DISC_ENABLE) {
3788 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3792 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3793 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3794 IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval);
3799 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3801 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3803 ptr = &mpt->mpt_dev_page1[tgt];
3805 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3807 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3812 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3814 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3816 ptr = &mpt->mpt_dev_page1[tgt];
3817 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3818 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3819 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3820 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3821 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3825 ptr->RequestedParameters |=
3826 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3827 ptr->RequestedParameters |=
3828 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3830 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3833 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3834 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3839 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3841 CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3844 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3845 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3846 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3847 tmp = mpt->mpt_dev_page1[tgt];
3848 host2mpt_config_page_scsi_device_1(&tmp);
3849 rv = mpt_write_cur_cfg_page(mpt, tgt,
3850 &tmp.Header, sizeof(tmp), FALSE, 5000);
3852 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3858 /****************************** Timeout Recovery ******************************/
3860 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3864 error = kproc_create(mpt_recovery_thread, mpt,
3865 &mpt->recovery_thread, /*flags*/0,
3866 /*altstack*/0, "mpt_recovery%d", mpt->unit);
3871 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3874 if (mpt->recovery_thread == NULL) {
3877 mpt->shutdwn_recovery = 1;
3880 * Sleep on a slightly different location
3881 * for this interlock just for added safety.
3883 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3887 mpt_recovery_thread(void *arg)
3889 struct mpt_softc *mpt;
3891 mpt = (struct mpt_softc *)arg;
3894 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3895 if (mpt->shutdwn_recovery == 0) {
3896 mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3899 if (mpt->shutdwn_recovery != 0) {
3902 mpt_recover_commands(mpt);
3904 mpt->recovery_thread = NULL;
3905 wakeup(&mpt->recovery_thread);
3911 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3912 u_int channel, target_id_t target, lun_id_t lun, u_int abort_ctx,
3915 MSG_SCSI_TASK_MGMT *tmf_req;
3919 * Wait for any current TMF request to complete.
3920 * We're only allowed to issue one TMF at a time.
3922 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3923 sleep_ok, MPT_TMF_MAX_TIMEOUT);
3925 mpt_reset(mpt, TRUE);
3929 mpt_assign_serno(mpt, mpt->tmf_req);
3930 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3932 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3933 memset(tmf_req, 0, sizeof(*tmf_req));
3934 tmf_req->TargetID = target;
3935 tmf_req->Bus = channel;
3936 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3937 tmf_req->TaskType = type;
3938 tmf_req->MsgFlags = flags;
3939 tmf_req->MsgContext =
3940 htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3941 be64enc(tmf_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun));
3942 tmf_req->TaskMsgContext = abort_ctx;
3944 mpt_lprt(mpt, MPT_PRT_DEBUG,
3945 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3946 mpt->tmf_req->serno, tmf_req->MsgContext);
3947 if (mpt->verbose > MPT_PRT_DEBUG) {
3948 mpt_print_request(tmf_req);
3951 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3952 ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3953 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3954 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3955 if (error != MPT_OK) {
3956 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
3957 mpt->tmf_req->state = REQ_STATE_FREE;
3958 mpt_reset(mpt, TRUE);
3964 * When a command times out, it is placed on the requeust_timeout_list
3965 * and we wake our recovery thread. The MPT-Fusion architecture supports
3966 * only a single TMF operation at a time, so we serially abort/bdr, etc,
3967 * the timedout transactions. The next TMF is issued either by the
3968 * completion handler of the current TMF waking our recovery thread,
3969 * or the TMF timeout handler causing a hard reset sequence.
3972 mpt_recover_commands(struct mpt_softc *mpt)
3978 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3980 * No work to do- leave.
3982 mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3987 * Flush any commands whose completion coincides with their timeout.
3991 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3993 * The timedout commands have already
3994 * completed. This typically means
3995 * that either the timeout value was on
3996 * the hairy edge of what the device
3997 * requires or - more likely - interrupts
3998 * are not happening.
4000 mpt_prt(mpt, "Timedout requests already complete. "
4001 "Interrupts may not be functioning.\n");
4002 mpt_enable_ints(mpt);
4007 * We have no visibility into the current state of the
4008 * controller, so attempt to abort the commands in the
4009 * order they timed-out. For initiator commands, we
4010 * depend on the reply handler pulling requests off
4013 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4016 MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4018 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4019 req, req->serno, hdrp->Function);
4022 mpt_prt(mpt, "null ccb in timed out request. "
4023 "Resetting Controller.\n");
4024 mpt_reset(mpt, TRUE);
4027 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4030 * Check to see if this is not an initiator command and
4031 * deal with it differently if it is.
4033 switch (hdrp->Function) {
4034 case MPI_FUNCTION_SCSI_IO_REQUEST:
4035 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4039 * XXX: FIX ME: need to abort target assists...
4041 mpt_prt(mpt, "just putting it back on the pend q\n");
4042 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4043 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4048 error = mpt_scsi_send_tmf(mpt,
4049 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4050 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4051 htole32(req->index | scsi_io_handler_id), TRUE);
4055 * mpt_scsi_send_tmf hard resets on failure, so no
4056 * need to do so here. Our queue should be emptied
4057 * by the hard reset.
4062 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4063 REQ_STATE_DONE, TRUE, 500);
4065 status = le16toh(mpt->tmf_req->IOCStatus);
4066 response = mpt->tmf_req->ResponseCode;
4067 mpt->tmf_req->state = REQ_STATE_FREE;
4071 * If we've errored out,, reset the controller.
4073 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4074 "Resetting controller\n");
4075 mpt_reset(mpt, TRUE);
4079 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4080 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4081 "Resetting controller.\n", status);
4082 mpt_reset(mpt, TRUE);
4086 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4087 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4088 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4089 "Resetting controller.\n", response);
4090 mpt_reset(mpt, TRUE);
4093 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4097 /************************ Target Mode Support ****************************/
4099 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4101 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4102 PTR_SGE_TRANSACTION32 tep;
4103 PTR_SGE_SIMPLE32 se;
4107 paddr = req->req_pbuf;
4108 paddr += MPT_RQSL(mpt);
4111 memset(fc, 0, MPT_REQUEST_AREA);
4112 fc->BufferCount = 1;
4113 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4114 fc->MsgContext = htole32(req->index | fc_els_handler_id);
4117 * Okay, set up ELS buffer pointers. ELS buffer pointers
4118 * consist of a TE SGL element (with details length of zero)
4119 * followed by a SIMPLE SGL element which holds the address
4123 tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4125 tep->ContextSize = 4;
4127 tep->TransactionContext[0] = htole32(ioindex);
4129 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4131 MPI_SGE_FLAGS_HOST_TO_IOC |
4132 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4133 MPI_SGE_FLAGS_LAST_ELEMENT |
4134 MPI_SGE_FLAGS_END_OF_LIST |
4135 MPI_SGE_FLAGS_END_OF_BUFFER;
4136 fl <<= MPI_SGE_FLAGS_SHIFT;
4137 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4138 se->FlagsLength = htole32(fl);
4139 se->Address = htole32((uint32_t) paddr);
4140 mpt_lprt(mpt, MPT_PRT_DEBUG,
4141 "add ELS index %d ioindex %d for %p:%u\n",
4142 req->index, ioindex, req, req->serno);
4143 KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4144 ("mpt_fc_post_els: request not locked"));
4145 mpt_send_cmd(mpt, req);
4149 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4151 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4152 PTR_CMD_BUFFER_DESCRIPTOR cb;
4155 paddr = req->req_pbuf;
4156 paddr += MPT_RQSL(mpt);
4157 memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4158 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4161 fc->BufferCount = 1;
4162 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4163 fc->BufferLength = MIN(MPT_REQUEST_AREA - MPT_RQSL(mpt), UINT8_MAX);
4164 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4166 cb = &fc->Buffer[0];
4167 cb->IoIndex = htole16(ioindex);
4168 cb->u.PhysicalAddress32 = htole32((U32) paddr);
4170 mpt_check_doorbell(mpt);
4171 mpt_send_cmd(mpt, req);
4175 mpt_add_els_buffers(struct mpt_softc *mpt)
4179 if (mpt->is_fc == 0) {
4183 if (mpt->els_cmds_allocated) {
4187 mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
4188 M_DEVBUF, M_NOWAIT | M_ZERO);
4190 if (mpt->els_cmd_ptrs == NULL) {
4195 * Feed the chip some ELS buffer resources
4197 for (i = 0; i < MPT_MAX_ELS; i++) {
4198 request_t *req = mpt_get_request(mpt, FALSE);
4202 req->state |= REQ_STATE_LOCKED;
4203 mpt->els_cmd_ptrs[i] = req;
4204 mpt_fc_post_els(mpt, req, i);
4208 mpt_prt(mpt, "unable to add ELS buffer resources\n");
4209 free(mpt->els_cmd_ptrs, M_DEVBUF);
4210 mpt->els_cmd_ptrs = NULL;
4213 if (i != MPT_MAX_ELS) {
4214 mpt_lprt(mpt, MPT_PRT_INFO,
4215 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS);
4217 mpt->els_cmds_allocated = i;
4222 mpt_add_target_commands(struct mpt_softc *mpt)
4226 if (mpt->tgt_cmd_ptrs) {
4230 max = MPT_MAX_REQUESTS(mpt) >> 1;
4231 if (max > mpt->mpt_max_tgtcmds) {
4232 max = mpt->mpt_max_tgtcmds;
4235 malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4236 if (mpt->tgt_cmd_ptrs == NULL) {
4238 "mpt_add_target_commands: could not allocate cmd ptrs\n");
4242 for (i = 0; i < max; i++) {
4245 req = mpt_get_request(mpt, FALSE);
4249 req->state |= REQ_STATE_LOCKED;
4250 mpt->tgt_cmd_ptrs[i] = req;
4251 mpt_post_target_command(mpt, req, i);
4256 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4257 free(mpt->tgt_cmd_ptrs, M_DEVBUF);
4258 mpt->tgt_cmd_ptrs = NULL;
4262 mpt->tgt_cmds_allocated = i;
4265 mpt_lprt(mpt, MPT_PRT_INFO,
4266 "added %d of %d target bufs\n", i, max);
4272 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4275 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4277 } else if (lun >= MPT_MAX_LUNS) {
4279 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4282 if (mpt->tenabled == 0) {
4284 (void) mpt_fc_reset_link(mpt, 0);
4288 if (lun == CAM_LUN_WILDCARD) {
4289 mpt->trt_wildcard.enabled = 1;
4291 mpt->trt[lun].enabled = 1;
4297 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4301 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4303 } else if (lun >= MPT_MAX_LUNS) {
4305 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4308 if (lun == CAM_LUN_WILDCARD) {
4309 mpt->trt_wildcard.enabled = 0;
4311 mpt->trt[lun].enabled = 0;
4313 for (i = 0; i < MPT_MAX_LUNS; i++) {
4314 if (mpt->trt[i].enabled) {
4318 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4320 (void) mpt_fc_reset_link(mpt, 0);
4328 * Called with MPT lock held
4331 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4333 struct ccb_scsiio *csio = &ccb->csio;
4334 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4335 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4337 switch (tgt->state) {
4338 case TGT_STATE_IN_CAM:
4340 case TGT_STATE_MOVING_DATA:
4341 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4342 xpt_freeze_simq(mpt->sim, 1);
4343 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4344 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4348 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4349 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4350 mpt_tgt_dump_req_state(mpt, cmd_req);
4351 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4356 if (csio->dxfer_len) {
4357 bus_dmamap_callback_t *cb;
4358 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4362 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4363 ("dxfer_len %u but direction is NONE", csio->dxfer_len));
4365 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4366 if (mpt->outofbeer == 0) {
4368 xpt_freeze_simq(mpt->sim, 1);
4369 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4371 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4372 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4376 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4377 if (sizeof (bus_addr_t) > 4) {
4378 cb = mpt_execute_req_a64;
4380 cb = mpt_execute_req;
4384 ccb->ccb_h.ccb_req_ptr = req;
4387 * Record the currently active ccb and the
4388 * request for it in our target state area.
4393 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4397 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4399 ta->QueueTag = ssp->InitiatorTag;
4400 } else if (mpt->is_spi) {
4401 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4403 ta->QueueTag = sp->Tag;
4405 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4406 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4407 ta->ReplyWord = htole32(tgt->reply_desc);
4408 be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(csio->ccb_h.target_lun));
4410 ta->RelativeOffset = tgt->bytes_xfered;
4411 ta->DataLength = ccb->csio.dxfer_len;
4412 if (ta->DataLength > tgt->resid) {
4413 ta->DataLength = tgt->resid;
4417 * XXX Should be done after data transfer completes?
4419 csio->resid = csio->dxfer_len - ta->DataLength;
4420 tgt->resid -= csio->dxfer_len;
4421 tgt->bytes_xfered += csio->dxfer_len;
4423 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4424 ta->TargetAssistFlags |=
4425 TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4428 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4429 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4430 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4431 ta->TargetAssistFlags |=
4432 TARGET_ASSIST_FLAGS_AUTO_STATUS;
4435 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4437 mpt_lprt(mpt, MPT_PRT_DEBUG,
4438 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4439 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4440 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4442 error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb,
4444 if (error == EINPROGRESS) {
4445 xpt_freeze_simq(mpt->sim, 1);
4446 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4450 * XXX: I don't know why this seems to happen, but
4451 * XXX: completing the CCB seems to make things happy.
4452 * XXX: This seems to happen if the initiator requests
4453 * XXX: enough data that we have to do multiple CTIOs.
4455 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4456 mpt_lprt(mpt, MPT_PRT_DEBUG,
4457 "Meaningless STATUS CCB (%p): flags %x status %x "
4458 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4459 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4460 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4461 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4465 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status,
4466 (void *)&csio->sense_data,
4467 (ccb->ccb_h.flags & CAM_SEND_SENSE) ?
4468 csio->sense_len : 0);
4473 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4474 lun_id_t lun, int send, uint8_t *data, size_t length)
4476 mpt_tgt_state_t *tgt;
4477 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4485 * We enter with resid set to the data load for the command.
4487 tgt = MPT_TGT_STATE(mpt, cmd_req);
4488 if (length == 0 || tgt->resid == 0) {
4490 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL, 0);
4494 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4495 mpt_prt(mpt, "out of resources- dropping local response\n");
4501 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4505 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4506 ta->QueueTag = ssp->InitiatorTag;
4507 } else if (mpt->is_spi) {
4508 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4509 ta->QueueTag = sp->Tag;
4511 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4512 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4513 ta->ReplyWord = htole32(tgt->reply_desc);
4514 be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun));
4515 ta->RelativeOffset = 0;
4516 ta->DataLength = length;
4518 dptr = req->req_vbuf;
4519 dptr += MPT_RQSL(mpt);
4520 pptr = req->req_pbuf;
4521 pptr += MPT_RQSL(mpt);
4522 memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4524 se = (SGE_SIMPLE32 *) &ta->SGL[0];
4525 memset(se, 0,sizeof (*se));
4527 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4529 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4530 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4533 MPI_pSGE_SET_LENGTH(se, length);
4534 flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4535 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4536 MPI_pSGE_SET_FLAGS(se, flags);
4540 tgt->resid -= length;
4541 tgt->bytes_xfered = length;
4542 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4543 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4545 tgt->state = TGT_STATE_MOVING_DATA;
4547 mpt_send_cmd(mpt, req);
4551 * Abort queued up CCBs
4554 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4556 struct mpt_hdr_stailq *lp;
4557 struct ccb_hdr *srch;
4558 union ccb *accb = ccb->cab.abort_ccb;
4559 tgt_resource_t *trtp;
4560 mpt_tgt_state_t *tgt;
4564 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4565 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD)
4566 trtp = &mpt->trt_wildcard;
4568 trtp = &mpt->trt[ccb->ccb_h.target_lun];
4569 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4571 tag = accb->atio.tag_id;
4574 tag = accb->cin1.tag_id;
4577 /* Search the CCB among queued. */
4578 STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4579 if (srch != &accb->ccb_h)
4581 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4582 accb->ccb_h.status = CAM_REQ_ABORTED;
4584 return (CAM_REQ_CMP);
4587 /* Search the CCB among running. */
4588 req = MPT_TAG_2_REQ(mpt, tag);
4589 tgt = MPT_TGT_STATE(mpt, req);
4590 if (tgt->tag_id == tag) {
4591 mpt_abort_target_cmd(mpt, req);
4592 return (CAM_REQ_CMP);
4595 return (CAM_UA_ABORT);
4599 * Ask the MPT to abort the current target command
4602 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4606 PTR_MSG_TARGET_MODE_ABORT abtp;
4608 req = mpt_get_request(mpt, FALSE);
4612 abtp = req->req_vbuf;
4613 memset(abtp, 0, sizeof (*abtp));
4615 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4616 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4617 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4618 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4620 if (mpt->is_fc || mpt->is_sas) {
4621 mpt_send_cmd(mpt, req);
4623 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4629 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4630 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4631 * FC929 to set bogus FC_RSP fields (nonzero residuals
4632 * but w/o RESID fields set). This causes QLogic initiators
4633 * to think maybe that a frame was lost.
4635 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4636 * we use allocated requests to do TARGET_ASSIST and we
4637 * need to know when to release them.
4641 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4642 uint8_t status, uint8_t const *sense_data, u_int sense_len)
4645 mpt_tgt_state_t *tgt;
4646 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4652 cmd_vbuf = cmd_req->req_vbuf;
4653 cmd_vbuf += MPT_RQSL(mpt);
4654 tgt = MPT_TGT_STATE(mpt, cmd_req);
4656 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4657 if (mpt->outofbeer == 0) {
4659 xpt_freeze_simq(mpt->sim, 1);
4660 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4663 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4664 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4668 "could not allocate status request- dropping\n");
4674 ccb->ccb_h.ccb_mpt_ptr = mpt;
4675 ccb->ccb_h.ccb_req_ptr = req;
4679 * Record the currently active ccb, if any, and the
4680 * request for it in our target state area.
4684 tgt->state = TGT_STATE_SENDING_STATUS;
4687 paddr = req->req_pbuf;
4688 paddr += MPT_RQSL(mpt);
4690 memset(tp, 0, sizeof (*tp));
4691 tp->StatusCode = status;
4692 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4694 PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4695 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4699 sts_vbuf = req->req_vbuf;
4700 sts_vbuf += MPT_RQSL(mpt);
4701 rsp = (uint32_t *) sts_vbuf;
4702 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4705 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4706 * It has to be big-endian in memory and is organized
4707 * in 32 bit words, which are much easier to deal with
4708 * as words which are swizzled as needed.
4710 * All we're filling here is the FC_RSP payload.
4711 * We may just have the chip synthesize it if
4712 * we have no residual and an OK status.
4715 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4717 rsp[2] = htobe32(status);
4718 #define MIN_FCP_RESPONSE_SIZE 24
4719 #ifndef WE_TRUST_AUTO_GOOD_STATUS
4720 resplen = MIN_FCP_RESPONSE_SIZE;
4722 if (tgt->resid < 0) {
4723 rsp[2] |= htobe32(0x400); /* XXXX NEED MNEMONIC!!!! */
4724 rsp[3] = htobe32(-tgt->resid);
4725 resplen = MIN_FCP_RESPONSE_SIZE;
4726 } else if (tgt->resid > 0) {
4727 rsp[2] |= htobe32(0x800); /* XXXX NEED MNEMONIC!!!! */
4728 rsp[3] = htobe32(tgt->resid);
4729 resplen = MIN_FCP_RESPONSE_SIZE;
4731 if (sense_len > 0) {
4732 rsp[2] |= htobe32(0x200); /* XXXX NEED MNEMONIC!!!! */
4733 rsp[4] = htobe32(sense_len);
4734 memcpy(&rsp[6], sense_data, sense_len);
4735 resplen = MIN_FCP_RESPONSE_SIZE + sense_len;
4737 } else if (mpt->is_sas) {
4738 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4739 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4740 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4742 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4743 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4744 tp->QueueTag = htole16(sp->Tag);
4745 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4748 tp->ReplyWord = htole32(tgt->reply_desc);
4749 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4751 #ifdef WE_CAN_USE_AUTO_REPOST
4752 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4754 if (status == SCSI_STATUS_OK && resplen == 0) {
4755 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4757 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4758 fl = MPI_SGE_FLAGS_HOST_TO_IOC |
4759 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4760 MPI_SGE_FLAGS_LAST_ELEMENT |
4761 MPI_SGE_FLAGS_END_OF_LIST |
4762 MPI_SGE_FLAGS_END_OF_BUFFER;
4763 fl <<= MPI_SGE_FLAGS_SHIFT;
4765 tp->StatusDataSGE.FlagsLength = htole32(fl);
4768 mpt_lprt(mpt, MPT_PRT_DEBUG,
4769 "STATUS_CCB %p (with%s sense) tag %x req %p:%u resid %u\n",
4770 ccb, sense_len > 0 ? "" : "out", tgt->tag_id,
4771 req, req->serno, tgt->resid);
4772 if (mpt->verbose > MPT_PRT_DEBUG)
4773 mpt_print_request(req->req_vbuf);
4775 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4776 mpt_req_timeout(req, SBT_1S * 60, mpt_timeout, ccb);
4778 mpt_send_cmd(mpt, req);
4782 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4783 tgt_resource_t *trtp, int init_id)
4785 struct ccb_immediate_notify *inot;
4786 mpt_tgt_state_t *tgt;
4788 tgt = MPT_TGT_STATE(mpt, req);
4789 inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots);
4791 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4792 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL, 0);
4795 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4796 mpt_lprt(mpt, MPT_PRT_DEBUG1,
4797 "Get FREE INOT %p lun %jx\n", inot,
4798 (uintmax_t)inot->ccb_h.target_lun);
4800 inot->initiator_id = init_id; /* XXX */
4801 inot->tag_id = tgt->tag_id;
4804 * This is a somewhat grotesque attempt to map from task management
4805 * to old style SCSI messages. God help us all.
4808 case MPT_QUERY_TASK_SET:
4809 inot->arg = MSG_QUERY_TASK_SET;
4811 case MPT_ABORT_TASK_SET:
4812 inot->arg = MSG_ABORT_TASK_SET;
4814 case MPT_CLEAR_TASK_SET:
4815 inot->arg = MSG_CLEAR_TASK_SET;
4817 case MPT_QUERY_ASYNC_EVENT:
4818 inot->arg = MSG_QUERY_ASYNC_EVENT;
4820 case MPT_LOGICAL_UNIT_RESET:
4821 inot->arg = MSG_LOGICAL_UNIT_RESET;
4823 case MPT_TARGET_RESET:
4824 inot->arg = MSG_TARGET_RESET;
4827 inot->arg = MSG_CLEAR_ACA;
4830 inot->arg = MSG_NOOP;
4833 tgt->ccb = (union ccb *) inot;
4834 inot->ccb_h.status = CAM_MESSAGE_RECV;
4835 xpt_done((union ccb *)inot);
4839 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4841 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
4842 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
4843 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ',
4844 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I',
4845 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V',
4848 struct ccb_accept_tio *atiop;
4851 mpt_tgt_state_t *tgt;
4852 tgt_resource_t *trtp = NULL;
4856 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4860 * Stash info for the current command where we can get at it later.
4862 vbuf = req->req_vbuf;
4863 vbuf += MPT_RQSL(mpt);
4864 if (mpt->verbose >= MPT_PRT_DEBUG) {
4865 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
4866 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
4867 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
4868 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
4872 * Get our state pointer set up.
4874 tgt = MPT_TGT_STATE(mpt, req);
4875 if (tgt->state != TGT_STATE_LOADED) {
4876 mpt_tgt_dump_req_state(mpt, req);
4877 panic("bad target state in mpt_scsi_tgt_atio");
4879 memset(tgt, 0, sizeof (mpt_tgt_state_t));
4880 tgt->state = TGT_STATE_IN_CAM;
4881 tgt->reply_desc = reply_desc;
4882 ioindex = GET_IO_INDEX(reply_desc);
4885 * The tag we construct here allows us to find the
4886 * original request that the command came in with.
4888 * This way we don't have to depend on anything but the
4889 * tag to find things when CCBs show back up from CAM.
4891 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4894 PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4895 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4896 if (fc->FcpCntl[2]) {
4898 * Task Management Request
4900 switch (fc->FcpCntl[2]) {
4902 fct = MPT_QUERY_TASK_SET;
4905 fct = MPT_ABORT_TASK_SET;
4908 fct = MPT_CLEAR_TASK_SET;
4911 fct = MPT_QUERY_ASYNC_EVENT;
4914 fct = MPT_LOGICAL_UNIT_RESET;
4917 fct = MPT_TARGET_RESET;
4920 fct = MPT_CLEAR_ACA;
4923 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4925 mpt_scsi_tgt_status(mpt, NULL, req,
4926 SCSI_STATUS_OK, NULL, 0);
4930 switch (fc->FcpCntl[1]) {
4932 tag_action = MSG_SIMPLE_Q_TAG;
4935 tag_action = MSG_HEAD_OF_Q_TAG;
4938 tag_action = MSG_ORDERED_Q_TAG;
4942 * Bah. Ignore Untagged Queing and ACA
4944 tag_action = MSG_SIMPLE_Q_TAG;
4948 tgt->resid = be32toh(fc->FcpDl);
4950 lunptr = fc->FcpLun;
4951 tgt->itag = fc->OptionalOxid;
4952 } else if (mpt->is_sas) {
4953 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4954 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4956 lunptr = ssp->LogicalUnitNumber;
4957 tgt->itag = ssp->InitiatorTag;
4959 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4960 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4962 lunptr = sp->LogicalUnitNumber;
4963 tgt->itag = sp->Tag;
4966 lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(lunptr));
4969 * Deal with non-enabled or bad luns here.
4971 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4972 mpt->trt[lun].enabled == 0) {
4973 if (mpt->twildcard) {
4974 trtp = &mpt->trt_wildcard;
4975 } else if (fct == MPT_NIL_TMT_VALUE) {
4977 * In this case, we haven't got an upstream listener
4978 * for either a specific lun or wildcard luns. We
4979 * have to make some sensible response. For regular
4980 * inquiry, just return some NOT HERE inquiry data.
4981 * For VPD inquiry, report illegal field in cdb.
4982 * For REQUEST SENSE, just return NO SENSE data.
4983 * REPORT LUNS gets illegal command.
4984 * All other commands get 'no such device'.
4986 uint8_t sense[MPT_SENSE_SIZE];
4989 memset(sense, 0, sizeof(sense));
5002 len = min(tgt->resid, cdbp[4]);
5003 len = min(len, sizeof (null_iqd));
5004 mpt_lprt(mpt, MPT_PRT_DEBUG,
5005 "local inquiry %ld bytes\n", (long) len);
5006 mpt_scsi_tgt_local(mpt, req, lun, 1,
5013 len = min(tgt->resid, cdbp[4]);
5014 len = min(len, sizeof (sense));
5015 mpt_lprt(mpt, MPT_PRT_DEBUG,
5016 "local reqsense %ld bytes\n", (long) len);
5017 mpt_scsi_tgt_local(mpt, req, lun, 1,
5022 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
5026 mpt_lprt(mpt, MPT_PRT_DEBUG,
5027 "CMD 0x%x to unmanaged lun %jx\n",
5028 cdbp[0], (uintmax_t)lun);
5032 mpt_scsi_tgt_status(mpt, NULL, req,
5033 SCSI_STATUS_CHECK_COND, sense, sizeof(sense));
5036 /* otherwise, leave trtp NULL */
5038 trtp = &mpt->trt[lun];
5042 * Deal with any task management
5044 if (fct != MPT_NIL_TMT_VALUE) {
5046 mpt_prt(mpt, "task mgmt function %x but no listener\n",
5048 mpt_scsi_tgt_status(mpt, NULL, req,
5049 SCSI_STATUS_OK, NULL, 0);
5051 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5052 GET_INITIATOR_INDEX(reply_desc));
5058 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5059 if (atiop == NULL) {
5060 mpt_lprt(mpt, MPT_PRT_WARN,
5061 "no ATIOs for lun %jx- sending back %s\n", (uintmax_t)lun,
5062 mpt->tenabled? "QUEUE FULL" : "BUSY");
5063 mpt_scsi_tgt_status(mpt, NULL, req,
5064 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5068 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5069 mpt_lprt(mpt, MPT_PRT_DEBUG1,
5070 "Get FREE ATIO %p lun %jx\n", atiop,
5071 (uintmax_t)atiop->ccb_h.target_lun);
5072 atiop->ccb_h.ccb_mpt_ptr = mpt;
5073 atiop->ccb_h.status = CAM_CDB_RECVD;
5074 atiop->ccb_h.target_lun = lun;
5075 atiop->sense_len = 0;
5076 atiop->tag_id = tgt->tag_id;
5077 atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5078 atiop->cdb_len = 16;
5079 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5081 atiop->tag_action = tag_action;
5082 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
5084 if (mpt->verbose >= MPT_PRT_DEBUG) {
5086 mpt_prt(mpt, "START_CCB %p for lun %jx CDB=<", atiop,
5087 (uintmax_t)atiop->ccb_h.target_lun);
5088 for (i = 0; i < atiop->cdb_len; i++) {
5089 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5090 (i == (atiop->cdb_len - 1))? '>' : ' ');
5092 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5093 tgt->itag, tgt->tag_id, tgt->reply_desc, tgt->resid);
5096 xpt_done((union ccb *)atiop);
5100 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5102 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5104 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5105 "nx %d tag 0x%08x itag 0x%04x state=%d\n", req, req->serno,
5106 tgt->reply_desc, tgt->resid, tgt->bytes_xfered, tgt->ccb,
5107 tgt->req, tgt->nxfers, tgt->tag_id, tgt->itag, tgt->state);
5111 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5114 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5115 req->index, req->index, req->state);
5116 mpt_tgt_dump_tgt_state(mpt, req);
5120 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5121 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5127 if (reply_frame == NULL) {
5129 * Figure out what the state of the command is.
5131 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5134 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5136 mpt_req_not_spcl(mpt, tgt->req,
5137 "turbo scsi_tgt_reply associated req", __LINE__);
5140 switch(tgt->state) {
5141 case TGT_STATE_LOADED:
5143 * This is a new command starting.
5145 mpt_scsi_tgt_atio(mpt, req, reply_desc);
5147 case TGT_STATE_MOVING_DATA:
5150 if (tgt->req == NULL) {
5151 panic("mpt: turbo target reply with null "
5152 "associated request moving data");
5156 if (tgt->is_local == 0) {
5157 panic("mpt: turbo target reply with "
5158 "null associated ccb moving data");
5161 mpt_lprt(mpt, MPT_PRT_DEBUG,
5162 "TARGET_ASSIST local done\n");
5163 TAILQ_REMOVE(&mpt->request_pending_list,
5165 mpt_free_request(mpt, tgt->req);
5167 mpt_scsi_tgt_status(mpt, NULL, req,
5173 mpt_req_untimeout(tgt->req, mpt_timeout, ccb);
5174 mpt_lprt(mpt, MPT_PRT_DEBUG,
5175 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5176 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5178 * Free the Target Assist Request
5180 KASSERT(tgt->req->ccb == ccb,
5181 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5182 tgt->req->serno, tgt->req->ccb));
5183 TAILQ_REMOVE(&mpt->request_pending_list,
5185 mpt_free_request(mpt, tgt->req);
5189 * Do we need to send status now? That is, are
5190 * we done with all our data transfers?
5192 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5193 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5194 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5195 KASSERT(ccb->ccb_h.status,
5196 ("zero ccb sts at %d", __LINE__));
5197 tgt->state = TGT_STATE_IN_CAM;
5198 if (mpt->outofbeer) {
5199 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5201 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5207 * Otherwise, send status (and sense)
5209 mpt_scsi_tgt_status(mpt, ccb, req,
5210 ccb->csio.scsi_status,
5211 (void *)&ccb->csio.sense_data,
5212 (ccb->ccb_h.flags & CAM_SEND_SENSE) ?
5213 ccb->csio.sense_len : 0);
5216 case TGT_STATE_SENDING_STATUS:
5217 case TGT_STATE_MOVING_DATA_AND_STATUS:
5222 if (tgt->req == NULL) {
5223 panic("mpt: turbo target reply with null "
5224 "associated request sending status");
5231 TGT_STATE_MOVING_DATA_AND_STATUS) {
5234 mpt_req_untimeout(tgt->req, mpt_timeout, ccb);
5235 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5236 ccb->ccb_h.status |= CAM_SENT_SENSE;
5238 mpt_lprt(mpt, MPT_PRT_DEBUG,
5239 "TARGET_STATUS tag %x sts %x flgs %x req "
5240 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5241 ccb->ccb_h.flags, tgt->req);
5243 * Free the Target Send Status Request
5245 KASSERT(tgt->req->ccb == ccb,
5246 ("tgt->req %p:%u tgt->req->ccb %p",
5247 tgt->req, tgt->req->serno, tgt->req->ccb));
5249 * Notify CAM that we're done
5251 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5252 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5253 KASSERT(ccb->ccb_h.status,
5254 ("ZERO ccb sts at %d", __LINE__));
5257 mpt_lprt(mpt, MPT_PRT_DEBUG,
5258 "TARGET_STATUS non-CAM for req %p:%u\n",
5259 tgt->req, tgt->req->serno);
5261 TAILQ_REMOVE(&mpt->request_pending_list,
5263 mpt_free_request(mpt, tgt->req);
5267 * And re-post the Command Buffer.
5268 * This will reset the state.
5270 ioindex = GET_IO_INDEX(reply_desc);
5271 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5273 mpt_post_target_command(mpt, req, ioindex);
5276 * And post a done for anyone who cares
5279 if (mpt->outofbeer) {
5280 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5282 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5288 case TGT_STATE_NIL: /* XXX This Never Happens XXX */
5289 tgt->state = TGT_STATE_LOADED;
5292 mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5293 "Reply Function\n", tgt->state);
5298 status = le16toh(reply_frame->IOCStatus);
5299 if (status != MPI_IOCSTATUS_SUCCESS) {
5300 dbg = MPT_PRT_ERROR;
5302 dbg = MPT_PRT_DEBUG1;
5306 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5307 req, req->serno, reply_frame, reply_frame->Function, status);
5309 switch (reply_frame->Function) {
5310 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5312 mpt_tgt_state_t *tgt;
5314 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5316 if (status != MPI_IOCSTATUS_SUCCESS) {
5322 tgt = MPT_TGT_STATE(mpt, req);
5323 KASSERT(tgt->state == TGT_STATE_LOADING,
5324 ("bad state 0x%x on reply to buffer post", tgt->state));
5325 mpt_assign_serno(mpt, req);
5326 tgt->state = TGT_STATE_LOADED;
5329 case MPI_FUNCTION_TARGET_ASSIST:
5331 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5333 mpt_prt(mpt, "target assist completion\n");
5334 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5335 mpt_free_request(mpt, req);
5337 case MPI_FUNCTION_TARGET_STATUS_SEND:
5339 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5341 mpt_prt(mpt, "status send completion\n");
5342 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5343 mpt_free_request(mpt, req);
5345 case MPI_FUNCTION_TARGET_MODE_ABORT:
5347 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5348 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5349 PTR_MSG_TARGET_MODE_ABORT abtp =
5350 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5351 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5353 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5355 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5356 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5357 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5358 mpt_free_request(mpt, req);
5362 mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5363 "0x%x\n", reply_frame->Function);