2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
5 * Copyright (c) 2000, 2001 by Greg Ansley
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
61 * Support from LSI-Logic has also gone a great deal toward making this a
62 * workable subsystem and is gratefully acknowledged.
65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66 * Copyright (c) 2005, WHEEL Sp. z o.o.
67 * Copyright (c) 2004, 2005 Justin T. Gibbs
68 * All rights reserved.
70 * Redistribution and use in source and binary forms, with or without
71 * modification, are permitted provided that the following conditions are
73 * 1. Redistributions of source code must retain the above copyright
74 * notice, this list of conditions and the following disclaimer.
75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76 * substantially similar to the "NO WARRANTY" disclaimer below
77 * ("Disclaimer") and any redistribution must be conditioned upon including
78 * a substantially similar Disclaimer requirement for further binary
80 * 3. Neither the names of the above listed copyright holders nor the names
81 * of any contributors may be used to endorse or promote products derived
82 * from this software without specific prior written permission.
84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96 #include <sys/cdefs.h>
97 __FBSDID("$FreeBSD$");
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 #include "dev/mpt/mpilib/mpi_sas.h"
108 #if __FreeBSD_version >= 500000
109 #include <sys/sysctl.h>
111 #include <sys/callout.h>
112 #include <sys/kthread.h>
114 #if __FreeBSD_version >= 700025
115 #ifndef CAM_NEW_TRAN_CODE
116 #define CAM_NEW_TRAN_CODE 1
120 static void mpt_poll(struct cam_sim *);
121 static timeout_t mpt_timeout;
122 static void mpt_action(struct cam_sim *, union ccb *);
124 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
125 static void mpt_setwidth(struct mpt_softc *, int, int);
126 static void mpt_setsync(struct mpt_softc *, int, int, int);
127 static int mpt_update_spi_config(struct mpt_softc *, int);
128 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
130 static mpt_reply_handler_t mpt_scsi_reply_handler;
131 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
132 static mpt_reply_handler_t mpt_fc_els_reply_handler;
133 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
134 MSG_DEFAULT_REPLY *);
135 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
136 static int mpt_fc_reset_link(struct mpt_softc *, int);
138 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
139 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
140 static void mpt_recovery_thread(void *arg);
141 static void mpt_recover_commands(struct mpt_softc *mpt);
143 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
144 u_int, u_int, u_int, int);
146 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
147 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
148 static int mpt_add_els_buffers(struct mpt_softc *mpt);
149 static int mpt_add_target_commands(struct mpt_softc *mpt);
150 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
151 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
152 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
153 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
154 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
155 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
156 uint8_t, uint8_t const *);
158 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
159 tgt_resource_t *, int);
160 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
161 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
162 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
163 static mpt_reply_handler_t mpt_sata_pass_reply_handler;
165 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
166 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
167 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
168 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
170 static mpt_probe_handler_t mpt_cam_probe;
171 static mpt_attach_handler_t mpt_cam_attach;
172 static mpt_enable_handler_t mpt_cam_enable;
173 static mpt_ready_handler_t mpt_cam_ready;
174 static mpt_event_handler_t mpt_cam_event;
175 static mpt_reset_handler_t mpt_cam_ioc_reset;
176 static mpt_detach_handler_t mpt_cam_detach;
178 static struct mpt_personality mpt_cam_personality =
181 .probe = mpt_cam_probe,
182 .attach = mpt_cam_attach,
183 .enable = mpt_cam_enable,
184 .ready = mpt_cam_ready,
185 .event = mpt_cam_event,
186 .reset = mpt_cam_ioc_reset,
187 .detach = mpt_cam_detach,
190 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
191 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
193 int mpt_enable_sata_wc = -1;
194 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
197 mpt_cam_probe(struct mpt_softc *mpt)
202 * Only attach to nodes that support the initiator or target role
203 * (or want to) or have RAID physical devices that need CAM pass-thru
206 if (mpt->do_cfg_role) {
207 role = mpt->cfg_role;
211 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
212 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
219 mpt_cam_attach(struct mpt_softc *mpt)
221 struct cam_devq *devq;
222 mpt_handler_t handler;
226 TAILQ_INIT(&mpt->request_timeout_list);
227 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
228 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
230 handler.reply_handler = mpt_scsi_reply_handler;
231 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
232 &scsi_io_handler_id);
237 handler.reply_handler = mpt_scsi_tmf_reply_handler;
238 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
239 &scsi_tmf_handler_id);
245 * If we're fibre channel and could support target mode, we register
246 * an ELS reply handler and give it resources.
248 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
249 handler.reply_handler = mpt_fc_els_reply_handler;
250 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
255 if (mpt_add_els_buffers(mpt) == FALSE) {
259 maxq -= mpt->els_cmds_allocated;
263 * If we support target mode, we register a reply handler for it,
264 * but don't add command resources until we actually enable target
267 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
268 handler.reply_handler = mpt_scsi_tgt_reply_handler;
269 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
270 &mpt->scsi_tgt_handler_id);
277 handler.reply_handler = mpt_sata_pass_reply_handler;
278 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
279 &sata_pass_handler_id);
287 * We keep one request reserved for timeout TMF requests.
289 mpt->tmf_req = mpt_get_request(mpt, FALSE);
290 if (mpt->tmf_req == NULL) {
291 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
297 * Mark the request as free even though not on the free list.
298 * There is only one TMF request allowed to be outstanding at
299 * a time and the TMF routines perform their own allocation
300 * tracking using the standard state flags.
302 mpt->tmf_req->state = REQ_STATE_FREE;
305 if (mpt_spawn_recovery_thread(mpt) != 0) {
306 mpt_prt(mpt, "Unable to spawn recovery thread!\n");
312 * The rest of this is CAM foo, for which we need to drop our lock
314 MPTLOCK_2_CAMLOCK(mpt);
317 * Create the device queue for our SIM(s).
319 devq = cam_simq_alloc(maxq);
321 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
327 * Construct our SIM entry.
329 mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
330 mpt->unit, 1, maxq, devq);
331 if (mpt->sim == NULL) {
332 mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
339 * Register exactly this bus.
341 if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
342 mpt_prt(mpt, "Bus registration Failed!\n");
347 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
348 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
349 mpt_prt(mpt, "Unable to allocate Path!\n");
355 * Only register a second bus for RAID physical
356 * devices if the controller supports RAID.
358 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
359 CAMLOCK_2_MPTLOCK(mpt);
364 * Create a "bus" to export all hidden disks to CAM.
366 mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
367 mpt->unit, 1, maxq, devq);
368 if (mpt->phydisk_sim == NULL) {
369 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
377 if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) {
378 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
383 if (xpt_create_path(&mpt->phydisk_path, NULL,
384 cam_sim_path(mpt->phydisk_sim),
385 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
386 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
390 CAMLOCK_2_MPTLOCK(mpt);
391 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
395 CAMLOCK_2_MPTLOCK(mpt);
402 * Read FC configuration information
405 mpt_read_config_info_fc(struct mpt_softc *mpt)
407 char *topology = NULL;
410 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
411 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
415 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
416 mpt->mpt_fcport_page0.Header.PageVersion,
417 mpt->mpt_fcport_page0.Header.PageLength,
418 mpt->mpt_fcport_page0.Header.PageNumber,
419 mpt->mpt_fcport_page0.Header.PageType);
422 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
423 sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
425 mpt_prt(mpt, "failed to read FC Port Page 0\n");
429 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
431 switch (mpt->mpt_fcport_page0.Flags &
432 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
433 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
434 mpt->mpt_fcport_speed = 0;
435 topology = "<NO LOOP>";
437 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
440 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
441 topology = "NL-Port";
443 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
446 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
447 topology = "FL-Port";
450 mpt->mpt_fcport_speed = 0;
455 mpt_lprt(mpt, MPT_PRT_INFO,
456 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
457 "Speed %u-Gbit\n", topology,
458 mpt->mpt_fcport_page0.WWNN.High,
459 mpt->mpt_fcport_page0.WWNN.Low,
460 mpt->mpt_fcport_page0.WWPN.High,
461 mpt->mpt_fcport_page0.WWPN.Low,
462 mpt->mpt_fcport_speed);
463 #if __FreeBSD_version >= 500000
465 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
466 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
468 snprintf(mpt->scinfo.fc.wwnn,
469 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
470 mpt->mpt_fcport_page0.WWNN.High,
471 mpt->mpt_fcport_page0.WWNN.Low);
473 snprintf(mpt->scinfo.fc.wwpn,
474 sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
475 mpt->mpt_fcport_page0.WWPN.High,
476 mpt->mpt_fcport_page0.WWPN.Low);
478 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
479 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
480 "World Wide Node Name");
482 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
483 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
484 "World Wide Port Name");
492 * Set FC configuration information.
495 mpt_set_initial_config_fc(struct mpt_softc *mpt)
498 CONFIG_PAGE_FC_PORT_1 fc;
503 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
504 &fc.Header, FALSE, 5000);
506 mpt_prt(mpt, "failed to read FC page 1 header\n");
507 return (mpt_fc_reset_link(mpt, 1));
510 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
511 &fc.Header, sizeof (fc), FALSE, 5000);
513 mpt_prt(mpt, "failed to read FC page 1\n");
514 return (mpt_fc_reset_link(mpt, 1));
518 * Check our flags to make sure we support the role we want.
522 fl = le32toh(fc.Flags);;
524 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
525 role |= MPT_ROLE_INITIATOR;
527 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
528 role |= MPT_ROLE_TARGET;
531 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
533 if (mpt->do_cfg_role == 0) {
534 role = mpt->cfg_role;
536 mpt->do_cfg_role = 0;
539 if (role != mpt->cfg_role) {
540 if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
541 if ((role & MPT_ROLE_INITIATOR) == 0) {
542 mpt_prt(mpt, "adding initiator role\n");
543 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
546 mpt_prt(mpt, "keeping initiator role\n");
548 } else if (role & MPT_ROLE_INITIATOR) {
549 mpt_prt(mpt, "removing initiator role\n");
552 if (mpt->cfg_role & MPT_ROLE_TARGET) {
553 if ((role & MPT_ROLE_TARGET) == 0) {
554 mpt_prt(mpt, "adding target role\n");
555 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
558 mpt_prt(mpt, "keeping target role\n");
560 } else if (role & MPT_ROLE_TARGET) {
561 mpt_prt(mpt, "removing target role\n");
564 mpt->role = mpt->cfg_role;
567 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
568 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
569 mpt_prt(mpt, "adding OXID option\n");
570 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
576 fc.Flags = htole32(fl);
577 r = mpt_write_cfg_page(mpt,
578 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
579 sizeof(fc), FALSE, 5000);
581 mpt_prt(mpt, "failed to update NVRAM with changes\n");
584 mpt_prt(mpt, "NOTE: NVRAM changes will not take "
585 "effect until next reboot or IOC reset\n");
591 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
593 ConfigExtendedPageHeader_t hdr;
594 struct mptsas_phyinfo *phyinfo;
595 SasIOUnitPage0_t *buffer;
598 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
599 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
603 if (hdr.ExtPageLength == 0) {
608 len = hdr.ExtPageLength * 4;
609 buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
610 if (buffer == NULL) {
615 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
616 0, &hdr, buffer, len, 0, 10000);
618 free(buffer, M_DEVBUF);
622 portinfo->num_phys = buffer->NumPhys;
623 portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) *
624 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
625 if (portinfo->phy_info == NULL) {
626 free(buffer, M_DEVBUF);
631 for (i = 0; i < portinfo->num_phys; i++) {
632 phyinfo = &portinfo->phy_info[i];
633 phyinfo->phy_num = i;
634 phyinfo->port_id = buffer->PhyData[i].Port;
635 phyinfo->negotiated_link_rate =
636 buffer->PhyData[i].NegotiatedLinkRate;
638 le16toh(buffer->PhyData[i].ControllerDevHandle);
641 free(buffer, M_DEVBUF);
647 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
648 uint32_t form, uint32_t form_specific)
650 ConfigExtendedPageHeader_t hdr;
651 SasPhyPage0_t *buffer;
654 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
655 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
659 if (hdr.ExtPageLength == 0) {
664 buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
665 if (buffer == NULL) {
670 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
671 form + form_specific, &hdr, buffer,
672 sizeof(SasPhyPage0_t), 0, 10000);
674 free(buffer, M_DEVBUF);
678 phy_info->hw_link_rate = buffer->HwLinkRate;
679 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
680 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
681 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
683 free(buffer, M_DEVBUF);
689 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
690 uint32_t form, uint32_t form_specific)
692 ConfigExtendedPageHeader_t hdr;
693 SasDevicePage0_t *buffer;
694 uint64_t sas_address;
697 bzero(device_info, sizeof(*device_info));
698 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
699 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
703 if (hdr.ExtPageLength == 0) {
708 buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
709 if (buffer == NULL) {
714 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
715 form + form_specific, &hdr, buffer,
716 sizeof(SasDevicePage0_t), 0, 10000);
718 free(buffer, M_DEVBUF);
722 device_info->dev_handle = le16toh(buffer->DevHandle);
723 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
724 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
725 device_info->slot = le16toh(buffer->Slot);
726 device_info->phy_num = buffer->PhyNum;
727 device_info->physical_port = buffer->PhysicalPort;
728 device_info->target_id = buffer->TargetID;
729 device_info->bus = buffer->Bus;
730 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
731 device_info->sas_address = le64toh(sas_address);
732 device_info->device_info = le32toh(buffer->DeviceInfo);
734 free(buffer, M_DEVBUF);
740 * Read SAS configuration information. Nothing to do yet.
743 mpt_read_config_info_sas(struct mpt_softc *mpt)
745 struct mptsas_portinfo *portinfo;
746 struct mptsas_phyinfo *phyinfo;
749 portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
750 if (portinfo == NULL)
753 error = mptsas_sas_io_unit_pg0(mpt, portinfo);
755 free(portinfo, M_DEVBUF);
759 for (i = 0; i < portinfo->num_phys; i++) {
760 phyinfo = &portinfo->phy_info[i];
761 error = mptsas_sas_phy_pg0(mpt, phyinfo,
762 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
763 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
766 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
767 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
768 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
772 phyinfo->identify.phy_num = phyinfo->phy_num = i;
773 if (phyinfo->attached.dev_handle)
774 error = mptsas_sas_device_pg0(mpt,
776 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
777 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
778 phyinfo->attached.dev_handle);
782 mpt->sas_portinfo = portinfo;
783 free(portinfo, M_DEVBUF);
788 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
791 SataPassthroughRequest_t *pass;
795 req = mpt_get_request(mpt, 0);
799 pass = req->req_vbuf;
800 bzero(pass, sizeof(SataPassthroughRequest_t));
801 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
802 pass->TargetID = devinfo->target_id;
803 pass->Bus = devinfo->bus;
804 pass->PassthroughFlags = 0;
805 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
806 pass->DataLength = 0;
807 pass->MsgContext = htole32(req->index | sata_pass_handler_id);
808 pass->CommandFIS[0] = 0x27;
809 pass->CommandFIS[1] = 0x80;
810 pass->CommandFIS[2] = 0xef;
811 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
812 pass->CommandFIS[7] = 0x40;
813 pass->CommandFIS[15] = 0x08;
815 mpt_check_doorbell(mpt);
816 mpt_send_cmd(mpt, req);
817 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
820 mpt_free_request(mpt, req);
821 printf("error %d sending passthrough\n", error);
825 status = le16toh(req->IOCStatus);
826 if (status != MPI_IOCSTATUS_SUCCESS) {
827 mpt_free_request(mpt, req);
828 printf("IOCSTATUS %d\n", status);
832 mpt_free_request(mpt, req);
836 * Set SAS configuration information. Nothing to do yet.
839 mpt_set_initial_config_sas(struct mpt_softc *mpt)
841 struct mptsas_phyinfo *phyinfo;
844 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
845 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
846 phyinfo = &mpt->sas_portinfo->phy_info[i];
847 if (phyinfo->attached.dev_handle == 0)
849 if ((phyinfo->attached.device_info &
850 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
853 device_printf(mpt->dev,
854 "%sabling SATA WC on phy %d\n",
855 (mpt_enable_sata_wc) ? "En" : "Dis", i);
856 mptsas_set_sata_wc(mpt, &phyinfo->attached,
865 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
866 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
870 if (reply_frame != NULL) {
871 MSG_SATA_PASSTHROUGH_REQUEST *pass;
872 MSG_SATA_PASSTHROUGH_REPLY *reply;
874 pass = (MSG_SATA_PASSTHROUGH_REQUEST *)req->req_vbuf;
875 reply = (MSG_SATA_PASSTHROUGH_REPLY *)reply_frame;
876 req->IOCStatus = le16toh(reply_frame->IOCStatus);
878 req->state &= ~REQ_STATE_QUEUED;
879 req->state |= REQ_STATE_DONE;
880 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
881 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
883 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
885 * Whew- we can free this request (late completion)
887 mpt_free_request(mpt, req);
895 * Read SCSI configuration information
898 mpt_read_config_info_spi(struct mpt_softc *mpt)
902 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
903 &mpt->mpt_port_page0.Header, FALSE, 5000);
907 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
908 mpt->mpt_port_page0.Header.PageVersion,
909 mpt->mpt_port_page0.Header.PageLength,
910 mpt->mpt_port_page0.Header.PageNumber,
911 mpt->mpt_port_page0.Header.PageType);
913 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
914 &mpt->mpt_port_page1.Header, FALSE, 5000);
918 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
919 mpt->mpt_port_page1.Header.PageVersion,
920 mpt->mpt_port_page1.Header.PageLength,
921 mpt->mpt_port_page1.Header.PageNumber,
922 mpt->mpt_port_page1.Header.PageType);
924 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
925 &mpt->mpt_port_page2.Header, FALSE, 5000);
929 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
930 mpt->mpt_port_page2.Header.PageVersion,
931 mpt->mpt_port_page2.Header.PageLength,
932 mpt->mpt_port_page2.Header.PageNumber,
933 mpt->mpt_port_page2.Header.PageType);
935 for (i = 0; i < 16; i++) {
936 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
937 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
941 mpt_lprt(mpt, MPT_PRT_DEBUG,
942 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
943 mpt->mpt_dev_page0[i].Header.PageVersion,
944 mpt->mpt_dev_page0[i].Header.PageLength,
945 mpt->mpt_dev_page0[i].Header.PageNumber,
946 mpt->mpt_dev_page0[i].Header.PageType);
948 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
949 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
953 mpt_lprt(mpt, MPT_PRT_DEBUG,
954 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
955 mpt->mpt_dev_page1[i].Header.PageVersion,
956 mpt->mpt_dev_page1[i].Header.PageLength,
957 mpt->mpt_dev_page1[i].Header.PageNumber,
958 mpt->mpt_dev_page1[i].Header.PageType);
962 * At this point, we don't *have* to fail. As long as we have
963 * valid config header information, we can (barely) lurch
967 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
968 sizeof(mpt->mpt_port_page0), FALSE, 5000);
970 mpt_prt(mpt, "failed to read SPI Port Page 0\n");
972 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
973 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
974 mpt->mpt_port_page0.Capabilities,
975 mpt->mpt_port_page0.PhysicalInterface);
978 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
979 sizeof(mpt->mpt_port_page1), FALSE, 5000);
981 mpt_prt(mpt, "failed to read SPI Port Page 1\n");
983 mpt_lprt(mpt, MPT_PRT_DEBUG,
984 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
985 mpt->mpt_port_page1.Configuration,
986 mpt->mpt_port_page1.OnBusTimerValue);
989 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
990 sizeof(mpt->mpt_port_page2), FALSE, 5000);
992 mpt_prt(mpt, "failed to read SPI Port Page 2\n");
994 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
995 "Port Page 2: Flags %x Settings %x\n",
996 mpt->mpt_port_page2.PortFlags,
997 mpt->mpt_port_page2.PortSettings);
998 for (i = 0; i < 16; i++) {
999 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1000 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1001 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1002 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1003 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1007 for (i = 0; i < 16; i++) {
1008 rv = mpt_read_cur_cfg_page(mpt, i,
1009 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1013 "cannot read SPI Target %d Device Page 0\n", i);
1016 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1017 "target %d page 0: Negotiated Params %x Information %x\n",
1018 i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1019 mpt->mpt_dev_page0[i].Information);
1021 rv = mpt_read_cur_cfg_page(mpt, i,
1022 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1026 "cannot read SPI Target %d Device Page 1\n", i);
1029 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1030 "target %d page 1: Requested Params %x Configuration %x\n",
1031 i, mpt->mpt_dev_page1[i].RequestedParameters,
1032 mpt->mpt_dev_page1[i].Configuration);
1038 * Validate SPI configuration information.
1040 * In particular, validate SPI Port Page 1.
1043 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1045 int i, j, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
1048 mpt->mpt_disc_enable = 0xff;
1049 mpt->mpt_tag_enable = 0;
1051 if (mpt->mpt_port_page1.Configuration != pp1val) {
1052 CONFIG_PAGE_SCSI_PORT_1 tmp;
1054 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1055 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1056 tmp = mpt->mpt_port_page1;
1057 tmp.Configuration = pp1val;
1058 error = mpt_write_cur_cfg_page(mpt, 0,
1059 &tmp.Header, sizeof(tmp), FALSE, 5000);
1063 error = mpt_read_cur_cfg_page(mpt, 0,
1064 &tmp.Header, sizeof(tmp), FALSE, 5000);
1068 if (tmp.Configuration != pp1val) {
1070 "failed to reset SPI Port Page 1 Config value\n");
1073 mpt->mpt_port_page1 = tmp;
1077 * The purpose of this exercise is to get
1078 * all targets back to async/narrow.
1080 * We skip this step if the BIOS has already negotiated
1081 * speeds with the targets and does not require us to
1082 * do Domain Validation.
1084 i = mpt->mpt_port_page2.PortSettings &
1085 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1086 j = mpt->mpt_port_page2.PortFlags &
1087 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
1088 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS /* &&
1089 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV */) {
1090 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1091 "honoring BIOS transfer negotiations\n");
1093 for (i = 0; i < 16; i++) {
1094 mpt->mpt_dev_page1[i].RequestedParameters = 0;
1095 mpt->mpt_dev_page1[i].Configuration = 0;
1096 (void) mpt_update_spi_config(mpt, i);
1103 mpt_cam_enable(struct mpt_softc *mpt)
1106 if (mpt_read_config_info_fc(mpt)) {
1109 if (mpt_set_initial_config_fc(mpt)) {
1112 } else if (mpt->is_sas) {
1113 if (mpt_read_config_info_sas(mpt)) {
1116 if (mpt_set_initial_config_sas(mpt)) {
1119 } else if (mpt->is_spi) {
1120 if (mpt_read_config_info_spi(mpt)) {
1123 if (mpt_set_initial_config_spi(mpt)) {
1131 mpt_cam_ready(struct mpt_softc *mpt)
1134 * If we're in target mode, hang out resources now
1135 * so we don't cause the world to hang talking to us.
1137 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1139 * Try to add some target command resources
1142 if (mpt_add_target_commands(mpt) == FALSE) {
1143 mpt_prt(mpt, "failed to add target commands\n");
1151 mpt_cam_detach(struct mpt_softc *mpt)
1153 mpt_handler_t handler;
1156 mpt_terminate_recovery_thread(mpt);
1158 handler.reply_handler = mpt_scsi_reply_handler;
1159 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1160 scsi_io_handler_id);
1161 handler.reply_handler = mpt_scsi_tmf_reply_handler;
1162 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1163 scsi_tmf_handler_id);
1164 handler.reply_handler = mpt_fc_els_reply_handler;
1165 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1167 handler.reply_handler = mpt_scsi_tgt_reply_handler;
1168 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1169 mpt->scsi_tgt_handler_id);
1170 handler.reply_handler = mpt_sata_pass_reply_handler;
1171 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1172 sata_pass_handler_id);
1174 if (mpt->tmf_req != NULL) {
1175 mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1176 mpt_free_request(mpt, mpt->tmf_req);
1177 mpt->tmf_req = NULL;
1179 if (mpt->sas_portinfo != NULL) {
1180 free(mpt->sas_portinfo, M_DEVBUF);
1181 mpt->sas_portinfo = NULL;
1184 if (mpt->sim != NULL) {
1185 MPTLOCK_2_CAMLOCK(mpt);
1186 xpt_free_path(mpt->path);
1187 xpt_bus_deregister(cam_sim_path(mpt->sim));
1188 cam_sim_free(mpt->sim, TRUE);
1190 CAMLOCK_2_MPTLOCK(mpt);
1193 if (mpt->phydisk_sim != NULL) {
1194 MPTLOCK_2_CAMLOCK(mpt);
1195 xpt_free_path(mpt->phydisk_path);
1196 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1197 cam_sim_free(mpt->phydisk_sim, TRUE);
1198 mpt->phydisk_sim = NULL;
1199 CAMLOCK_2_MPTLOCK(mpt);
1203 /* This routine is used after a system crash to dump core onto the swap device.
1206 mpt_poll(struct cam_sim *sim)
1208 struct mpt_softc *mpt;
1210 mpt = (struct mpt_softc *)cam_sim_softc(sim);
1217 * Watchdog timeout routine for SCSI requests.
1220 mpt_timeout(void *arg)
1223 struct mpt_softc *mpt;
1226 ccb = (union ccb *)arg;
1227 mpt = ccb->ccb_h.ccb_mpt_ptr;
1230 req = ccb->ccb_h.ccb_req_ptr;
1231 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1232 req->serno, ccb, req->ccb);
1233 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1234 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1235 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1236 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1237 req->state |= REQ_STATE_TIMEDOUT;
1238 mpt_wakeup_recovery_thread(mpt);
1244 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
1246 * Takes a list of physical segments and builds the SGL for SCSI IO command
1247 * and forwards the commard to the IOC after one last check that CAM has not
1248 * aborted the transaction.
1251 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1253 request_t *req, *trq;
1256 struct mpt_softc *mpt;
1258 uint32_t flags, nxt_off;
1260 MSG_REQUEST_HEADER *hdrp;
1265 req = (request_t *)arg;
1268 mpt = ccb->ccb_h.ccb_mpt_ptr;
1269 req = ccb->ccb_h.ccb_req_ptr;
1271 hdrp = req->req_vbuf;
1272 mpt_off = req->req_vbuf;
1274 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1279 switch (hdrp->Function) {
1280 case MPI_FUNCTION_SCSI_IO_REQUEST:
1281 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1283 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1285 case MPI_FUNCTION_TARGET_ASSIST:
1287 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1290 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1297 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1299 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1300 nseg, mpt->max_seg_cnt);
1305 if (error != EFBIG && error != ENOMEM) {
1306 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1308 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1310 mpt_freeze_ccb(ccb);
1311 if (error == EFBIG) {
1312 status = CAM_REQ_TOO_BIG;
1313 } else if (error == ENOMEM) {
1314 if (mpt->outofbeer == 0) {
1316 xpt_freeze_simq(mpt->sim, 1);
1317 mpt_lprt(mpt, MPT_PRT_DEBUG,
1320 status = CAM_REQUEUE_REQ;
1322 status = CAM_REQ_CMP_ERR;
1324 mpt_set_ccb_status(ccb, status);
1326 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1327 request_t *cmd_req =
1328 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1329 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1330 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1331 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1333 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1334 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1336 CAMLOCK_2_MPTLOCK(mpt);
1337 mpt_free_request(mpt, req);
1338 MPTLOCK_2_CAMLOCK(mpt);
1343 * No data to transfer?
1344 * Just make a single simple SGL with zero length.
1347 if (mpt->verbose >= MPT_PRT_DEBUG) {
1348 int tidx = ((char *)sglp) - mpt_off;
1349 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1353 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1354 MPI_pSGE_SET_FLAGS(se1,
1355 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1356 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1357 se1->FlagsLength = htole32(se1->FlagsLength);
1362 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1364 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1365 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1368 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1369 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1373 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1374 bus_dmasync_op_t op;
1376 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1377 op = BUS_DMASYNC_PREREAD;
1379 op = BUS_DMASYNC_PREWRITE;
1382 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1383 op = BUS_DMASYNC_PREWRITE;
1385 op = BUS_DMASYNC_PREREAD;
1388 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1392 * Okay, fill in what we can at the end of the command frame.
1393 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1394 * the command frame.
1396 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1397 * SIMPLE64 pointers and start doing CHAIN64 entries after
1401 if (nseg < MPT_NSGL_FIRST(mpt)) {
1405 * Leave room for CHAIN element
1407 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1410 se = (SGE_SIMPLE64 *) sglp;
1411 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1414 memset(se, 0, sizeof (*se));
1415 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1416 if (sizeof(bus_addr_t) > 4) {
1417 se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32;
1419 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1421 if (seg == first_lim - 1) {
1422 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1424 if (seg == nseg - 1) {
1425 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1426 MPI_SGE_FLAGS_END_OF_BUFFER;
1428 MPI_pSGE_SET_FLAGS(se, tf);
1429 se->FlagsLength = htole32(se->FlagsLength);
1437 * Tell the IOC where to find the first chain element.
1439 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1440 nxt_off = MPT_RQSL(mpt);
1444 * Make up the rest of the data segments out of a chain element
1445 * (contiained in the current request frame) which points to
1446 * SIMPLE64 elements in the next request frame, possibly ending
1447 * with *another* chain element (if there's more).
1449 while (seg < nseg) {
1451 uint32_t tf, cur_off;
1452 bus_addr_t chain_list_addr;
1455 * Point to the chain descriptor. Note that the chain
1456 * descriptor is at the end of the *previous* list (whether
1459 ce = (SGE_CHAIN64 *) se;
1462 * Before we change our current pointer, make sure we won't
1463 * overflow the request area with this frame. Note that we
1464 * test against 'greater than' here as it's okay in this case
1465 * to have next offset be just outside the request area.
1467 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1468 nxt_off = MPT_REQUEST_AREA;
1473 * Set our SGE element pointer to the beginning of the chain
1474 * list and update our next chain list offset.
1476 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1478 nxt_off += MPT_RQSL(mpt);
1481 * Now initialized the chain descriptor.
1483 memset(ce, 0, sizeof (*ce));
1486 * Get the physical address of the chain list.
1488 chain_list_addr = trq->req_pbuf;
1489 chain_list_addr += cur_off;
1490 if (sizeof (bus_addr_t) > 4) {
1492 htole32((uint32_t) ((uint64_t)chain_list_addr >> 32));
1494 ce->Address.Low = htole32((uint32_t) chain_list_addr);
1495 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1496 MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1499 * If we have more than a frame's worth of segments left,
1500 * set up the chain list to have the last element be another
1503 if ((nseg - seg) > MPT_NSGL(mpt)) {
1504 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1506 * The length of the chain is the length in bytes of the
1507 * number of segments plus the next chain element.
1509 * The next chain descriptor offset is the length,
1510 * in words, of the number of segments.
1512 ce->Length = (this_seg_lim - seg) *
1513 sizeof (SGE_SIMPLE64);
1514 ce->NextChainOffset = ce->Length >> 2;
1515 ce->Length += sizeof (SGE_CHAIN64);
1517 this_seg_lim = nseg;
1518 ce->Length = (this_seg_lim - seg) *
1519 sizeof (SGE_SIMPLE64);
1523 * Fill in the chain list SGE elements with our segment data.
1525 * If we're the last element in this chain list, set the last
1526 * element flag. If we're the completely last element period,
1527 * set the end of list and end of buffer flags.
1529 while (seg < this_seg_lim) {
1530 memset(se, 0, sizeof (*se));
1531 se->Address.Low = htole32(dm_segs->ds_addr);
1532 if (sizeof (bus_addr_t) > 4) {
1534 htole32(((uint64_t)dm_segs->ds_addr) >> 32);
1536 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1538 if (seg == this_seg_lim - 1) {
1539 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1541 if (seg == nseg - 1) {
1542 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1543 MPI_SGE_FLAGS_END_OF_BUFFER;
1545 MPI_pSGE_SET_FLAGS(se, tf);
1546 se->FlagsLength = htole32(se->FlagsLength);
1554 * If we have more segments to do and we've used up all of
1555 * the space in a request area, go allocate another one
1556 * and chain to that.
1558 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1561 CAMLOCK_2_MPTLOCK(mpt);
1562 nrq = mpt_get_request(mpt, FALSE);
1563 MPTLOCK_2_CAMLOCK(mpt);
1571 * Append the new request area on the tail of our list.
1573 if ((trq = req->chain) == NULL) {
1576 while (trq->chain != NULL) {
1582 mpt_off = trq->req_vbuf;
1583 if (mpt->verbose >= MPT_PRT_DEBUG) {
1584 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1592 * Last time we need to check if this CCB needs to be aborted.
1594 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1595 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1596 request_t *cmd_req =
1597 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1598 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1599 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1600 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1603 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1604 ccb->ccb_h.status & CAM_STATUS_MASK);
1605 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1606 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1608 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1609 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1611 CAMLOCK_2_MPTLOCK(mpt);
1612 mpt_free_request(mpt, req);
1613 MPTLOCK_2_CAMLOCK(mpt);
1617 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1618 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1619 ccb->ccb_h.timeout_ch =
1620 timeout(mpt_timeout, (caddr_t)ccb,
1621 (ccb->ccb_h.timeout * hz) / 1000);
1623 callout_handle_init(&ccb->ccb_h.timeout_ch);
1625 if (mpt->verbose > MPT_PRT_DEBUG) {
1627 mpt_print_request(req->req_vbuf);
1628 for (trq = req->chain; trq; trq = trq->chain) {
1629 printf(" Additional Chain Area %d\n", nc++);
1630 mpt_dump_sgl(trq->req_vbuf, 0);
1634 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1635 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1636 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1637 #ifdef WE_TRUST_AUTO_GOOD_STATUS
1638 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1639 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1640 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1642 tgt->state = TGT_STATE_MOVING_DATA;
1645 tgt->state = TGT_STATE_MOVING_DATA;
1648 CAMLOCK_2_MPTLOCK(mpt);
1649 mpt_send_cmd(mpt, req);
1650 MPTLOCK_2_CAMLOCK(mpt);
1654 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1656 request_t *req, *trq;
1659 struct mpt_softc *mpt;
1661 uint32_t flags, nxt_off;
1663 MSG_REQUEST_HEADER *hdrp;
1668 req = (request_t *)arg;
1671 mpt = ccb->ccb_h.ccb_mpt_ptr;
1672 req = ccb->ccb_h.ccb_req_ptr;
1674 hdrp = req->req_vbuf;
1675 mpt_off = req->req_vbuf;
1678 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1683 switch (hdrp->Function) {
1684 case MPI_FUNCTION_SCSI_IO_REQUEST:
1685 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1686 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1688 case MPI_FUNCTION_TARGET_ASSIST:
1690 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1693 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1700 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1702 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1703 nseg, mpt->max_seg_cnt);
1708 if (error != EFBIG && error != ENOMEM) {
1709 mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1711 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1713 mpt_freeze_ccb(ccb);
1714 if (error == EFBIG) {
1715 status = CAM_REQ_TOO_BIG;
1716 } else if (error == ENOMEM) {
1717 if (mpt->outofbeer == 0) {
1719 xpt_freeze_simq(mpt->sim, 1);
1720 mpt_lprt(mpt, MPT_PRT_DEBUG,
1723 status = CAM_REQUEUE_REQ;
1725 status = CAM_REQ_CMP_ERR;
1727 mpt_set_ccb_status(ccb, status);
1729 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1730 request_t *cmd_req =
1731 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1732 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1733 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1734 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1736 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1737 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1739 CAMLOCK_2_MPTLOCK(mpt);
1740 mpt_free_request(mpt, req);
1741 MPTLOCK_2_CAMLOCK(mpt);
1746 * No data to transfer?
1747 * Just make a single simple SGL with zero length.
1750 if (mpt->verbose >= MPT_PRT_DEBUG) {
1751 int tidx = ((char *)sglp) - mpt_off;
1752 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1756 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1757 MPI_pSGE_SET_FLAGS(se1,
1758 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1759 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1760 se1->FlagsLength = htole32(se1->FlagsLength);
1765 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1767 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1768 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1771 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1772 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1776 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1777 bus_dmasync_op_t op;
1779 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1780 op = BUS_DMASYNC_PREREAD;
1782 op = BUS_DMASYNC_PREWRITE;
1785 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1786 op = BUS_DMASYNC_PREWRITE;
1788 op = BUS_DMASYNC_PREREAD;
1791 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1795 * Okay, fill in what we can at the end of the command frame.
1796 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1797 * the command frame.
1799 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1800 * SIMPLE32 pointers and start doing CHAIN32 entries after
1804 if (nseg < MPT_NSGL_FIRST(mpt)) {
1808 * Leave room for CHAIN element
1810 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1813 se = (SGE_SIMPLE32 *) sglp;
1814 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1817 memset(se, 0,sizeof (*se));
1818 se->Address = dm_segs->ds_addr;
1822 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1824 if (seg == first_lim - 1) {
1825 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1827 if (seg == nseg - 1) {
1828 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1829 MPI_SGE_FLAGS_END_OF_BUFFER;
1831 MPI_pSGE_SET_FLAGS(se, tf);
1832 se->FlagsLength = htole32(se->FlagsLength);
1840 * Tell the IOC where to find the first chain element.
1842 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1843 nxt_off = MPT_RQSL(mpt);
1847 * Make up the rest of the data segments out of a chain element
1848 * (contiained in the current request frame) which points to
1849 * SIMPLE32 elements in the next request frame, possibly ending
1850 * with *another* chain element (if there's more).
1852 while (seg < nseg) {
1854 uint32_t tf, cur_off;
1855 bus_addr_t chain_list_addr;
1858 * Point to the chain descriptor. Note that the chain
1859 * descriptor is at the end of the *previous* list (whether
1862 ce = (SGE_CHAIN32 *) se;
1865 * Before we change our current pointer, make sure we won't
1866 * overflow the request area with this frame. Note that we
1867 * test against 'greater than' here as it's okay in this case
1868 * to have next offset be just outside the request area.
1870 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1871 nxt_off = MPT_REQUEST_AREA;
1876 * Set our SGE element pointer to the beginning of the chain
1877 * list and update our next chain list offset.
1879 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1881 nxt_off += MPT_RQSL(mpt);
1884 * Now initialized the chain descriptor.
1886 memset(ce, 0, sizeof (*ce));
1889 * Get the physical address of the chain list.
1891 chain_list_addr = trq->req_pbuf;
1892 chain_list_addr += cur_off;
1896 ce->Address = chain_list_addr;
1897 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1901 * If we have more than a frame's worth of segments left,
1902 * set up the chain list to have the last element be another
1905 if ((nseg - seg) > MPT_NSGL(mpt)) {
1906 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1908 * The length of the chain is the length in bytes of the
1909 * number of segments plus the next chain element.
1911 * The next chain descriptor offset is the length,
1912 * in words, of the number of segments.
1914 ce->Length = (this_seg_lim - seg) *
1915 sizeof (SGE_SIMPLE32);
1916 ce->NextChainOffset = ce->Length >> 2;
1917 ce->Length += sizeof (SGE_CHAIN32);
1919 this_seg_lim = nseg;
1920 ce->Length = (this_seg_lim - seg) *
1921 sizeof (SGE_SIMPLE32);
1925 * Fill in the chain list SGE elements with our segment data.
1927 * If we're the last element in this chain list, set the last
1928 * element flag. If we're the completely last element period,
1929 * set the end of list and end of buffer flags.
1931 while (seg < this_seg_lim) {
1932 memset(se, 0, sizeof (*se));
1933 se->Address = dm_segs->ds_addr;
1938 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1940 if (seg == this_seg_lim - 1) {
1941 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1943 if (seg == nseg - 1) {
1944 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1945 MPI_SGE_FLAGS_END_OF_BUFFER;
1947 MPI_pSGE_SET_FLAGS(se, tf);
1948 se->FlagsLength = htole32(se->FlagsLength);
1956 * If we have more segments to do and we've used up all of
1957 * the space in a request area, go allocate another one
1958 * and chain to that.
1960 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1963 CAMLOCK_2_MPTLOCK(mpt);
1964 nrq = mpt_get_request(mpt, FALSE);
1965 MPTLOCK_2_CAMLOCK(mpt);
1973 * Append the new request area on the tail of our list.
1975 if ((trq = req->chain) == NULL) {
1978 while (trq->chain != NULL) {
1984 mpt_off = trq->req_vbuf;
1985 if (mpt->verbose >= MPT_PRT_DEBUG) {
1986 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1994 * Last time we need to check if this CCB needs to be aborted.
1996 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1997 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1998 request_t *cmd_req =
1999 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2000 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2001 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2002 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2005 "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2006 ccb->ccb_h.status & CAM_STATUS_MASK);
2007 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2008 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2010 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2011 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2013 CAMLOCK_2_MPTLOCK(mpt);
2014 mpt_free_request(mpt, req);
2015 MPTLOCK_2_CAMLOCK(mpt);
2019 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2020 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2021 ccb->ccb_h.timeout_ch =
2022 timeout(mpt_timeout, (caddr_t)ccb,
2023 (ccb->ccb_h.timeout * hz) / 1000);
2025 callout_handle_init(&ccb->ccb_h.timeout_ch);
2027 if (mpt->verbose > MPT_PRT_DEBUG) {
2029 mpt_print_request(req->req_vbuf);
2030 for (trq = req->chain; trq; trq = trq->chain) {
2031 printf(" Additional Chain Area %d\n", nc++);
2032 mpt_dump_sgl(trq->req_vbuf, 0);
2036 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2037 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2038 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2039 #ifdef WE_TRUST_AUTO_GOOD_STATUS
2040 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2041 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2042 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2044 tgt->state = TGT_STATE_MOVING_DATA;
2047 tgt->state = TGT_STATE_MOVING_DATA;
2050 CAMLOCK_2_MPTLOCK(mpt);
2051 mpt_send_cmd(mpt, req);
2052 MPTLOCK_2_CAMLOCK(mpt);
2056 mpt_start(struct cam_sim *sim, union ccb *ccb)
2059 struct mpt_softc *mpt;
2060 MSG_SCSI_IO_REQUEST *mpt_req;
2061 struct ccb_scsiio *csio = &ccb->csio;
2062 struct ccb_hdr *ccbh = &ccb->ccb_h;
2063 bus_dmamap_callback_t *cb;
2067 /* Get the pointer for the physical addapter */
2068 mpt = ccb->ccb_h.ccb_mpt_ptr;
2069 raid_passthru = (sim == mpt->phydisk_sim);
2071 CAMLOCK_2_MPTLOCK(mpt);
2072 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2073 if (mpt->outofbeer == 0) {
2075 xpt_freeze_simq(mpt->sim, 1);
2076 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2078 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2079 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2080 MPTLOCK_2_CAMLOCK(mpt);
2085 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2087 MPTLOCK_2_CAMLOCK(mpt);
2089 if (sizeof (bus_addr_t) > 4) {
2090 cb = mpt_execute_req_a64;
2092 cb = mpt_execute_req;
2096 * Link the ccb and the request structure so we can find
2097 * the other knowing either the request or the ccb
2100 ccb->ccb_h.ccb_req_ptr = req;
2102 /* Now we build the command for the IOC */
2103 mpt_req = req->req_vbuf;
2104 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2106 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2107 if (raid_passthru) {
2108 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2109 CAMLOCK_2_MPTLOCK(mpt);
2110 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2111 MPTLOCK_2_CAMLOCK(mpt);
2112 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2113 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2117 MPTLOCK_2_CAMLOCK(mpt);
2118 mpt_req->Bus = 0; /* we never set bus here */
2120 tgt = ccb->ccb_h.target_id;
2121 mpt_req->Bus = 0; /* XXX */
2124 mpt_req->SenseBufferLength =
2125 (csio->sense_len < MPT_SENSE_SIZE) ?
2126 csio->sense_len : MPT_SENSE_SIZE;
2129 * We use the message context to find the request structure when we
2130 * Get the command completion interrupt from the IOC.
2132 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2134 /* Which physical device to do the I/O on */
2135 mpt_req->TargetID = tgt;
2137 /* We assume a single level LUN type */
2138 if (ccb->ccb_h.target_lun >= 256) {
2139 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
2140 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
2142 mpt_req->LUN[1] = ccb->ccb_h.target_lun;
2145 /* Set the direction of the transfer */
2146 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2147 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2148 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2149 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2151 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2154 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2155 switch(ccb->csio.tag_action) {
2156 case MSG_HEAD_OF_Q_TAG:
2157 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2160 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2162 case MSG_ORDERED_Q_TAG:
2163 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2165 case MSG_SIMPLE_Q_TAG:
2167 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2171 if (mpt->is_fc || mpt->is_sas) {
2172 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2174 /* XXX No such thing for a target doing packetized. */
2175 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2180 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2181 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2185 /* Copy the scsi command block into place */
2186 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2187 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2189 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2192 mpt_req->CDBLength = csio->cdb_len;
2193 mpt_req->DataLength = htole32(csio->dxfer_len);
2194 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2197 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2199 if (mpt->verbose == MPT_PRT_DEBUG) {
2201 mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2202 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2203 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2204 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2205 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2206 mpt_prtc(mpt, "(%s %u byte%s ",
2207 (df == MPI_SCSIIO_CONTROL_READ)?
2208 "read" : "write", csio->dxfer_len,
2209 (csio->dxfer_len == 1)? ")" : "s)");
2211 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
2212 ccb->ccb_h.target_lun, req, req->serno);
2216 * If we have any data to send with this command map it into bus space.
2218 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2219 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
2221 * We've been given a pointer to a single buffer.
2223 if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
2225 * Virtual address that needs to translated into
2226 * one or more physical address ranges.
2229 int s = splsoftvm();
2230 error = bus_dmamap_load(mpt->buffer_dmat,
2231 req->dmap, csio->data_ptr, csio->dxfer_len,
2234 if (error == EINPROGRESS) {
2236 * So as to maintain ordering,
2237 * freeze the controller queue
2238 * until our mapping is
2241 xpt_freeze_simq(mpt->sim, 1);
2242 ccbh->status |= CAM_RELEASE_SIMQ;
2246 * We have been given a pointer to single
2249 struct bus_dma_segment seg;
2251 (bus_addr_t)(vm_offset_t)csio->data_ptr;
2252 seg.ds_len = csio->dxfer_len;
2253 (*cb)(req, &seg, 1, 0);
2257 * We have been given a list of addresses.
2258 * This case could be easily supported but they are not
2259 * currently generated by the CAM subsystem so there
2260 * is no point in wasting the time right now.
2262 struct bus_dma_segment *segs;
2263 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
2264 (*cb)(req, NULL, 0, EFAULT);
2266 /* Just use the segments provided */
2267 segs = (struct bus_dma_segment *)csio->data_ptr;
2268 (*cb)(req, segs, csio->sglist_cnt, 0);
2272 (*cb)(req, NULL, 0, 0);
2277 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2284 error = mpt_scsi_send_tmf(mpt,
2285 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2286 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2287 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2288 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2289 0, /* XXX How do I get the channel ID? */
2290 tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2291 lun != CAM_LUN_WILDCARD ? lun : 0,
2296 * mpt_scsi_send_tmf hard resets on failure, so no
2297 * need to do so here.
2300 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2304 /* Wait for bus reset to be processed by the IOC. */
2305 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2306 REQ_STATE_DONE, sleep_ok, 5000);
2308 status = mpt->tmf_req->IOCStatus;
2309 response = mpt->tmf_req->ResponseCode;
2310 mpt->tmf_req->state = REQ_STATE_FREE;
2313 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2314 "Resetting controller.\n");
2315 mpt_reset(mpt, TRUE);
2319 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2320 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2321 "Resetting controller.\n", status);
2322 mpt_reset(mpt, TRUE);
2326 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2327 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2328 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2329 "Resetting controller.\n", response);
2330 mpt_reset(mpt, TRUE);
2337 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2341 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2343 req = mpt_get_request(mpt, FALSE);
2348 memset(fc, 0, sizeof(*fc));
2349 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2350 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2351 fc->MsgContext = htole32(req->index | fc_els_handler_id);
2352 mpt_send_cmd(mpt, req);
2354 r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2355 REQ_STATE_DONE, FALSE, 60 * 1000);
2357 mpt_free_request(mpt, req);
2364 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2365 MSG_EVENT_NOTIFY_REPLY *msg)
2367 uint32_t data0, data1;
2369 data0 = le32toh(msg->Data[0]);
2370 data1 = le32toh(msg->Data[1]);
2371 switch(msg->Event & 0xFF) {
2372 case MPI_EVENT_UNIT_ATTENTION:
2373 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2374 (data0 >> 8) & 0xff, data0 & 0xff);
2377 case MPI_EVENT_IOC_BUS_RESET:
2378 /* We generated a bus reset */
2379 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2380 (data0 >> 8) & 0xff);
2381 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2384 case MPI_EVENT_EXT_BUS_RESET:
2385 /* Someone else generated a bus reset */
2386 mpt_prt(mpt, "External Bus Reset Detected\n");
2388 * These replies don't return EventData like the MPI
2391 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2394 case MPI_EVENT_RESCAN:
2395 #if __FreeBSD_version >= 600000
2400 * In general this means a device has been added to the loop.
2402 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2403 if (mpt->ready == 0) {
2406 if (mpt->phydisk_sim) {
2407 pathid = cam_sim_path(mpt->phydisk_sim);;
2409 pathid = cam_sim_path(mpt->sim);
2411 MPTLOCK_2_CAMLOCK(mpt);
2413 * Allocate a CCB, create a wildcard path for this bus,
2414 * and schedule a rescan.
2416 ccb = xpt_alloc_ccb_nowait();
2418 mpt_prt(mpt, "unable to alloc CCB for rescan\n");
2419 CAMLOCK_2_MPTLOCK(mpt);
2423 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
2424 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2425 CAMLOCK_2_MPTLOCK(mpt);
2426 mpt_prt(mpt, "unable to create path for rescan\n");
2431 CAMLOCK_2_MPTLOCK(mpt);
2435 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2438 case MPI_EVENT_LINK_STATUS_CHANGE:
2439 mpt_prt(mpt, "Port %d: LinkState: %s\n",
2440 (data1 >> 8) & 0xff,
2441 ((data0 & 0xff) == 0)? "Failed" : "Active");
2444 case MPI_EVENT_LOOP_STATE_CHANGE:
2445 switch ((data0 >> 16) & 0xff) {
2448 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2449 "(Loop Initialization)\n",
2450 (data1 >> 8) & 0xff,
2451 (data0 >> 8) & 0xff,
2453 switch ((data0 >> 8) & 0xff) {
2455 if ((data0 & 0xff) == 0xF7) {
2456 mpt_prt(mpt, "Device needs AL_PA\n");
2458 mpt_prt(mpt, "Device %02x doesn't like "
2464 if ((data0 & 0xff) == 0xF7) {
2465 mpt_prt(mpt, "Device had loop failure "
2466 "at its receiver prior to acquiring"
2469 mpt_prt(mpt, "Device %02x detected loop"
2470 " failure at its receiver\n",
2475 mpt_prt(mpt, "Device %02x requests that device "
2476 "%02x reset itself\n",
2478 (data0 >> 8) & 0xFF);
2483 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2484 "LPE(%02x,%02x) (Loop Port Enable)\n",
2485 (data1 >> 8) & 0xff, /* Port */
2486 (data0 >> 8) & 0xff, /* Character 3 */
2487 (data0 ) & 0xff /* Character 4 */);
2490 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2491 "LPB(%02x,%02x) (Loop Port Bypass)\n",
2492 (data1 >> 8) & 0xff, /* Port */
2493 (data0 >> 8) & 0xff, /* Character 3 */
2494 (data0 ) & 0xff /* Character 4 */);
2497 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2498 "FC event (%02x %02x %02x)\n",
2499 (data1 >> 8) & 0xff, /* Port */
2500 (data0 >> 16) & 0xff, /* Event */
2501 (data0 >> 8) & 0xff, /* Character 3 */
2502 (data0 ) & 0xff /* Character 4 */);
2506 case MPI_EVENT_LOGOUT:
2507 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2508 (data1 >> 8) & 0xff, data0);
2510 case MPI_EVENT_QUEUE_FULL:
2512 struct cam_sim *sim;
2513 struct cam_path *tmppath;
2514 struct ccb_relsim crs;
2515 PTR_EVENT_DATA_QUEUE_FULL pqf =
2516 (PTR_EVENT_DATA_QUEUE_FULL) msg->Data;
2519 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2520 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2521 if (mpt->phydisk_sim) {
2522 sim = mpt->phydisk_sim;
2526 MPTLOCK_2_CAMLOCK(mpt);
2527 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2528 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2529 pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2530 mpt_prt(mpt, "unable to create a path to send "
2532 CAMLOCK_2_MPTLOCK(mpt);
2535 xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2536 crs.ccb_h.func_code = XPT_REL_SIMQ;
2537 crs.release_flags = RELSIM_ADJUST_OPENINGS;
2538 crs.openings = pqf->CurrentDepth - 1;
2539 xpt_action((union ccb *)&crs);
2540 if (crs.ccb_h.status != CAM_REQ_CMP) {
2541 mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2543 xpt_free_path(tmppath);
2545 CAMLOCK_2_MPTLOCK(mpt);
2548 case MPI_EVENT_EVENT_CHANGE:
2549 case MPI_EVENT_INTEGRATED_RAID:
2550 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2551 case MPI_EVENT_SAS_SES:
2554 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2562 * Reply path for all SCSI I/O requests, called from our
2563 * interrupt handler by extracting our handler index from
2564 * the MsgContext field of the reply from the IOC.
2566 * This routine is optimized for the common case of a
2567 * completion without error. All exception handling is
2568 * offloaded to non-inlined helper routines to minimize
2572 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2573 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2575 MSG_SCSI_IO_REQUEST *scsi_req;
2579 if (req->state == REQ_STATE_FREE) {
2580 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2584 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2587 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2592 tgt = scsi_req->TargetID;
2593 untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
2594 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2596 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2597 bus_dmasync_op_t op;
2599 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2600 op = BUS_DMASYNC_POSTREAD;
2602 op = BUS_DMASYNC_POSTWRITE;
2603 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2604 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2607 if (reply_frame == NULL) {
2609 * Context only reply, completion without error status.
2611 ccb->csio.resid = 0;
2612 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2613 ccb->csio.scsi_status = SCSI_STATUS_OK;
2615 mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2618 if (mpt->outofbeer) {
2619 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2621 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2623 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2624 struct scsi_inquiry_data *iq =
2625 (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2626 if (scsi_req->Function ==
2627 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2629 * Fake out the device type so that only the
2630 * pass-thru device will attach.
2632 iq->device &= ~0x1F;
2633 iq->device |= T_NODEVICE;
2636 if (mpt->verbose == MPT_PRT_DEBUG) {
2637 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2640 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2641 MPTLOCK_2_CAMLOCK(mpt);
2643 CAMLOCK_2_MPTLOCK(mpt);
2644 if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2645 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2647 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2649 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2651 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2652 ("CCB req needed wakeup"));
2654 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2656 mpt_free_request(mpt, req);
2661 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2662 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2664 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2666 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2668 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2670 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2671 /* Record IOC Status and Response Code of TMF for any waiters. */
2672 req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2673 req->ResponseCode = tmf_reply->ResponseCode;
2675 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2676 req, req->serno, le16toh(tmf_reply->IOCStatus));
2677 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2678 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2679 req->state |= REQ_STATE_DONE;
2682 mpt->tmf_req->state = REQ_STATE_FREE;
2688 * XXX: Move to definitions file
2706 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2707 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2710 MSG_LINK_SERVICE_RSP_REQUEST tmp;
2711 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2714 * We are going to reuse the ELS request to send this response back.
2717 memset(rsp, 0, sizeof(*rsp));
2719 #ifdef USE_IMMEDIATE_LINK_DATA
2721 * Apparently the IMMEDIATE stuff doesn't seem to work.
2723 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2725 rsp->RspLength = length;
2726 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2727 rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2730 * Copy over information from the original reply frame to
2731 * it's correct place in the response.
2733 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2736 * And now copy back the temporary area to the original frame.
2738 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2739 rsp = req->req_vbuf;
2741 #ifdef USE_IMMEDIATE_LINK_DATA
2742 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2745 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2746 bus_addr_t paddr = req->req_pbuf;
2747 paddr += MPT_RQSL(mpt);
2750 MPI_SGE_FLAGS_HOST_TO_IOC |
2751 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2752 MPI_SGE_FLAGS_LAST_ELEMENT |
2753 MPI_SGE_FLAGS_END_OF_LIST |
2754 MPI_SGE_FLAGS_END_OF_BUFFER;
2755 fl <<= MPI_SGE_FLAGS_SHIFT;
2757 se->FlagsLength = htole32(fl);
2758 se->Address = htole32((uint32_t) paddr);
2765 mpt_send_cmd(mpt, req);
2769 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2770 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2772 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2773 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2777 U16 status = le16toh(reply_frame->IOCStatus);
2780 int do_refresh = TRUE;
2783 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2784 ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2785 req, req->serno, rp->Function));
2786 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2787 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2789 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2792 mpt_lprt(mpt, MPT_PRT_DEBUG,
2793 "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2794 req, req->serno, reply_frame, reply_frame->Function);
2796 if (status != MPI_IOCSTATUS_SUCCESS) {
2797 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2798 status, reply_frame->Function);
2799 if (status == MPI_IOCSTATUS_INVALID_STATE) {
2801 * XXX: to get around shutdown issue
2810 * If the function of a link service response, we recycle the
2811 * response to be a refresh for a new link service request.
2813 * The request pointer is bogus in this case and we have to fetch
2814 * it based upon the TransactionContext.
2816 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2817 /* Freddie Uncle Charlie Katie */
2818 /* We don't get the IOINDEX as part of the Link Svc Rsp */
2819 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2820 if (mpt->els_cmd_ptrs[ioindex] == req) {
2824 KASSERT(ioindex < mpt->els_cmds_allocated,
2825 ("can't find my mommie!"));
2827 /* remove from active list as we're going to re-post it */
2828 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2829 req->state &= ~REQ_STATE_QUEUED;
2830 req->state |= REQ_STATE_DONE;
2831 mpt_fc_post_els(mpt, req, ioindex);
2835 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2836 /* remove from active list as we're done */
2837 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2838 req->state &= ~REQ_STATE_QUEUED;
2839 req->state |= REQ_STATE_DONE;
2840 if (req->state & REQ_STATE_TIMEDOUT) {
2841 mpt_lprt(mpt, MPT_PRT_DEBUG,
2842 "Sync Primitive Send Completed After Timeout\n");
2843 mpt_free_request(mpt, req);
2844 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2845 mpt_lprt(mpt, MPT_PRT_DEBUG,
2846 "Async Primitive Send Complete\n");
2847 mpt_free_request(mpt, req);
2849 mpt_lprt(mpt, MPT_PRT_DEBUG,
2850 "Sync Primitive Send Complete- Waking Waiter\n");
2856 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2857 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2858 "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2859 rp->MsgLength, rp->MsgFlags);
2863 if (rp->MsgLength <= 5) {
2865 * This is just a ack of an original ELS buffer post
2867 mpt_lprt(mpt, MPT_PRT_DEBUG,
2868 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2873 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2874 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2876 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2877 cmd = be32toh(elsbuf[0]) >> 24;
2879 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2880 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2884 ioindex = le32toh(rp->TransactionContext);
2885 req = mpt->els_cmd_ptrs[ioindex];
2887 if (rctl == ELS && type == 1) {
2891 * Send back a PRLI ACC
2893 mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2894 le32toh(rp->Wwn.PortNameHigh),
2895 le32toh(rp->Wwn.PortNameLow));
2896 elsbuf[0] = htobe32(0x02100014);
2897 elsbuf[1] |= htobe32(0x00000100);
2898 elsbuf[4] = htobe32(0x00000002);
2899 if (mpt->role & MPT_ROLE_TARGET)
2900 elsbuf[4] |= htobe32(0x00000010);
2901 if (mpt->role & MPT_ROLE_INITIATOR)
2902 elsbuf[4] |= htobe32(0x00000020);
2903 /* remove from active list as we're done */
2904 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2905 req->state &= ~REQ_STATE_QUEUED;
2906 req->state |= REQ_STATE_DONE;
2907 mpt_fc_els_send_response(mpt, req, rp, 20);
2911 memset(elsbuf, 0, 5 * (sizeof (U32)));
2912 elsbuf[0] = htobe32(0x02100014);
2913 elsbuf[1] = htobe32(0x08000100);
2914 mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2915 le32toh(rp->Wwn.PortNameHigh),
2916 le32toh(rp->Wwn.PortNameLow));
2917 /* remove from active list as we're done */
2918 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2919 req->state &= ~REQ_STATE_QUEUED;
2920 req->state |= REQ_STATE_DONE;
2921 mpt_fc_els_send_response(mpt, req, rp, 20);
2925 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2928 } else if (rctl == ABTS && type == 0) {
2929 uint16_t rx_id = le16toh(rp->Rxid);
2930 uint16_t ox_id = le16toh(rp->Oxid);
2931 request_t *tgt_req = NULL;
2934 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2935 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2936 le32toh(rp->Wwn.PortNameLow));
2937 if (rx_id >= mpt->mpt_max_tgtcmds) {
2938 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2939 } else if (mpt->tgt_cmd_ptrs == NULL) {
2940 mpt_prt(mpt, "No TGT CMD PTRS\n");
2942 tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2945 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2947 union ccb *ccb = tgt->ccb;
2950 vbuf = tgt_req->req_vbuf;
2951 vbuf += MPT_RQSL(mpt);
2954 * Check to make sure we have the correct command
2955 * The reply descriptor in the target state should
2956 * should contain an IoIndex that should match the
2959 * It'd be nice to have OX_ID to crosscheck with
2962 ct_id = GET_IO_INDEX(tgt->reply_desc);
2964 if (ct_id != rx_id) {
2965 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2966 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2974 "CCB (%p): lun %u flags %x status %x\n",
2975 ccb, ccb->ccb_h.target_lun,
2976 ccb->ccb_h.flags, ccb->ccb_h.status);
2978 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2979 "%x nxfers %x\n", tgt->state,
2980 tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2983 if (mpt_abort_target_cmd(mpt, tgt_req)) {
2984 mpt_prt(mpt, "unable to start TargetAbort\n");
2987 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2989 memset(elsbuf, 0, 5 * (sizeof (U32)));
2990 elsbuf[0] = htobe32(0);
2991 elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2992 elsbuf[2] = htobe32(0x000ffff);
2994 * Dork with the reply frame so that the reponse to it
2997 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2998 /* remove from active list as we're done */
2999 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3000 req->state &= ~REQ_STATE_QUEUED;
3001 req->state |= REQ_STATE_DONE;
3002 mpt_fc_els_send_response(mpt, req, rp, 12);
3005 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3007 if (do_refresh == TRUE) {
3008 /* remove from active list as we're done */
3009 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3010 req->state &= ~REQ_STATE_QUEUED;
3011 req->state |= REQ_STATE_DONE;
3012 mpt_fc_post_els(mpt, req, ioindex);
3018 * Clean up all SCSI Initiator personality state in response
3019 * to a controller reset.
3022 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3025 * The pending list is already run down by
3026 * the generic handler. Perform the same
3027 * operation on the timed out request list.
3029 mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3030 MPI_IOCSTATUS_INVALID_STATE);
3033 * XXX: We need to repost ELS and Target Command Buffers?
3037 * Inform the XPT that a bus reset has occurred.
3039 xpt_async(AC_BUS_RESET, mpt->path, NULL);
3043 * Parse additional completion information in the reply
3044 * frame for SCSI I/O requests.
3047 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3048 MSG_DEFAULT_REPLY *reply_frame)
3051 MSG_SCSI_IO_REPLY *scsi_io_reply;
3056 MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3057 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3058 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3059 ("MPT SCSI I/O Handler called with incorrect reply type"));
3060 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3061 ("MPT SCSI I/O Handler called with continuation reply"));
3063 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3064 ioc_status = le16toh(scsi_io_reply->IOCStatus);
3065 loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE;
3066 ioc_status &= MPI_IOCSTATUS_MASK;
3067 sstate = scsi_io_reply->SCSIState;
3071 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3073 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3074 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3075 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3076 ccb->csio.sense_resid =
3077 ccb->csio.sense_len - scsi_io_reply->SenseCount;
3078 bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3079 min(ccb->csio.sense_len, scsi_io_reply->SenseCount));
3082 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3084 * Tag messages rejected, but non-tagged retry
3087 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3091 switch(ioc_status) {
3092 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3095 * Linux driver indicates that a zero
3096 * transfer length with this error code
3097 * indicates a CRC error.
3099 * No need to swap the bytes for checking
3102 if (scsi_io_reply->TransferCount == 0) {
3103 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3107 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3108 case MPI_IOCSTATUS_SUCCESS:
3109 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3110 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3112 * Status was never returned for this transaction.
3114 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3115 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3116 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3117 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3118 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3119 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3120 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3122 /* XXX Handle SPI-Packet and FCP-2 reponse info. */
3123 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3125 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3127 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3128 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3130 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3131 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3133 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3135 * Since selection timeouts and "device really not
3136 * there" are grouped into this error code, report
3137 * selection timeout. Selection timeouts are
3138 * typically retried before giving up on the device
3139 * whereas "device not there" errors are considered
3142 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3144 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3145 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3147 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3148 mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3150 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3151 mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3153 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3154 ccb->ccb_h.status = CAM_UA_TERMIO;
3156 case MPI_IOCSTATUS_INVALID_STATE:
3158 * The IOC has been reset. Emulate a bus reset.
3161 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3162 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3164 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3165 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3167 * Don't clobber any timeout status that has
3168 * already been set for this transaction. We
3169 * want the SCSI layer to be able to differentiate
3170 * between the command we aborted due to timeout
3171 * and any innocent bystanders.
3173 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3175 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3178 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3179 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3181 case MPI_IOCSTATUS_BUSY:
3182 mpt_set_ccb_status(ccb, CAM_BUSY);
3184 case MPI_IOCSTATUS_INVALID_FUNCTION:
3185 case MPI_IOCSTATUS_INVALID_SGL:
3186 case MPI_IOCSTATUS_INTERNAL_ERROR:
3187 case MPI_IOCSTATUS_INVALID_FIELD:
3190 * Some of the above may need to kick
3191 * of a recovery action!!!!
3193 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3197 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3198 mpt_freeze_ccb(ccb);
3205 mpt_action(struct cam_sim *sim, union ccb *ccb)
3207 struct mpt_softc *mpt;
3208 struct ccb_trans_settings *cts;
3213 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3215 mpt = (struct mpt_softc *)cam_sim_softc(sim);
3216 KASSERT(MPT_OWNED(mpt) == 0, ("mpt owned on entrance to mpt_action"));
3217 raid_passthru = (sim == mpt->phydisk_sim);
3219 tgt = ccb->ccb_h.target_id;
3220 lun = ccb->ccb_h.target_lun;
3221 if (raid_passthru &&
3222 ccb->ccb_h.func_code != XPT_PATH_INQ &&
3223 ccb->ccb_h.func_code != XPT_RESET_BUS &&
3224 ccb->ccb_h.func_code != XPT_RESET_DEV) {
3225 CAMLOCK_2_MPTLOCK(mpt);
3226 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3227 MPTLOCK_2_CAMLOCK(mpt);
3228 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3229 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3233 MPTLOCK_2_CAMLOCK(mpt);
3235 ccb->ccb_h.ccb_mpt_ptr = mpt;
3237 switch (ccb->ccb_h.func_code) {
3238 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3240 * Do a couple of preliminary checks...
3242 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3243 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3244 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3245 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3249 /* Max supported CDB length is 16 bytes */
3250 /* XXX Unless we implement the new 32byte message type */
3251 if (ccb->csio.cdb_len >
3252 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3253 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3254 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3257 #ifdef MPT_TEST_MULTIPATH
3258 if (mpt->failure_id == ccb->ccb_h.target_id) {
3259 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3260 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3264 ccb->csio.scsi_status = SCSI_STATUS_OK;
3265 mpt_start(sim, ccb);
3269 if (raid_passthru) {
3270 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3271 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3275 if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3277 xpt_print(ccb->ccb_h.path, "reset bus\n");
3280 xpt_print(ccb->ccb_h.path, "reset device\n");
3282 CAMLOCK_2_MPTLOCK(mpt);
3283 (void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3284 MPTLOCK_2_CAMLOCK(mpt);
3287 * mpt_bus_reset is always successful in that it
3288 * will fall back to a hard reset should a bus
3289 * reset attempt fail.
3291 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3292 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3297 union ccb *accb = ccb->cab.abort_ccb;
3298 CAMLOCK_2_MPTLOCK(mpt);
3299 switch (accb->ccb_h.func_code) {
3300 case XPT_ACCEPT_TARGET_IO:
3301 case XPT_IMMED_NOTIFY:
3302 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3304 case XPT_CONT_TARGET_IO:
3305 mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3306 ccb->ccb_h.status = CAM_UA_ABORT;
3309 ccb->ccb_h.status = CAM_UA_ABORT;
3312 ccb->ccb_h.status = CAM_REQ_INVALID;
3315 MPTLOCK_2_CAMLOCK(mpt);
3319 #ifdef CAM_NEW_TRAN_CODE
3320 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3322 #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
3324 #define DP_DISC_ENABLE 0x1
3325 #define DP_DISC_DISABL 0x2
3326 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
3328 #define DP_TQING_ENABLE 0x4
3329 #define DP_TQING_DISABL 0x8
3330 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
3332 #define DP_WIDE 0x10
3333 #define DP_NARROW 0x20
3334 #define DP_WIDTH (DP_WIDE|DP_NARROW)
3336 #define DP_SYNC 0x40
3338 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
3340 #ifdef CAM_NEW_TRAN_CODE
3341 struct ccb_trans_settings_scsi *scsi;
3342 struct ccb_trans_settings_spi *spi;
3351 if (mpt->is_fc || mpt->is_sas) {
3352 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3356 #ifdef CAM_NEW_TRAN_CODE
3357 scsi = &cts->proto_specific.scsi;
3358 spi = &cts->xport_specific.spi;
3361 * We can be called just to valid transport and proto versions
3363 if (scsi->valid == 0 && spi->valid == 0) {
3364 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3370 * Skip attempting settings on RAID volume disks.
3371 * Other devices on the bus get the normal treatment.
3373 if (mpt->phydisk_sim && raid_passthru == 0 &&
3374 mpt_is_raid_volume(mpt, tgt) != 0) {
3375 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3376 "no transfer settings for RAID vols\n");
3377 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3381 i = mpt->mpt_port_page2.PortSettings &
3382 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3383 j = mpt->mpt_port_page2.PortFlags &
3384 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3385 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3386 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3387 mpt_lprt(mpt, MPT_PRT_ALWAYS,
3388 "honoring BIOS transfer negotiations\n");
3389 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3397 #ifndef CAM_NEW_TRAN_CODE
3398 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
3399 dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
3400 DP_DISC_ENABLE : DP_DISC_DISABL;
3403 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
3404 dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
3405 DP_TQING_ENABLE : DP_TQING_DISABL;
3408 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
3409 dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
3412 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
3413 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
3415 period = cts->sync_period;
3416 offset = cts->sync_offset;
3419 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3420 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3421 DP_DISC_ENABLE : DP_DISC_DISABL;
3424 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3425 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3426 DP_TQING_ENABLE : DP_TQING_DISABL;
3429 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3430 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3431 DP_WIDE : DP_NARROW;
3434 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3436 offset = spi->sync_offset;
3438 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3439 &mpt->mpt_dev_page1[tgt];
3440 offset = ptr->RequestedParameters;
3441 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3442 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3444 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3446 period = spi->sync_period;
3448 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3449 &mpt->mpt_dev_page1[tgt];
3450 period = ptr->RequestedParameters;
3451 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3452 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3455 CAMLOCK_2_MPTLOCK(mpt);
3456 if (dval & DP_DISC_ENABLE) {
3457 mpt->mpt_disc_enable |= (1 << tgt);
3458 } else if (dval & DP_DISC_DISABL) {
3459 mpt->mpt_disc_enable &= ~(1 << tgt);
3461 if (dval & DP_TQING_ENABLE) {
3462 mpt->mpt_tag_enable |= (1 << tgt);
3463 } else if (dval & DP_TQING_DISABL) {
3464 mpt->mpt_tag_enable &= ~(1 << tgt);
3466 if (dval & DP_WIDTH) {
3467 mpt_setwidth(mpt, tgt, 1);
3469 if (dval & DP_SYNC) {
3470 mpt_setsync(mpt, tgt, period, offset);
3473 MPTLOCK_2_CAMLOCK(mpt);
3474 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3477 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3478 "set [%d]: 0x%x period 0x%x offset %d\n",
3479 tgt, dval, period, offset);
3480 if (mpt_update_spi_config(mpt, tgt)) {
3481 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3483 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3485 MPTLOCK_2_CAMLOCK(mpt);
3488 case XPT_GET_TRAN_SETTINGS:
3490 #ifdef CAM_NEW_TRAN_CODE
3491 struct ccb_trans_settings_scsi *scsi;
3493 cts->protocol = PROTO_SCSI;
3495 struct ccb_trans_settings_fc *fc =
3496 &cts->xport_specific.fc;
3497 cts->protocol_version = SCSI_REV_SPC;
3498 cts->transport = XPORT_FC;
3499 cts->transport_version = 0;
3500 fc->valid = CTS_FC_VALID_SPEED;
3501 fc->bitrate = 100000;
3502 } else if (mpt->is_sas) {
3503 struct ccb_trans_settings_sas *sas =
3504 &cts->xport_specific.sas;
3505 cts->protocol_version = SCSI_REV_SPC2;
3506 cts->transport = XPORT_SAS;
3507 cts->transport_version = 0;
3508 sas->valid = CTS_SAS_VALID_SPEED;
3509 sas->bitrate = 300000;
3511 cts->protocol_version = SCSI_REV_2;
3512 cts->transport = XPORT_SPI;
3513 cts->transport_version = 2;
3514 if (mpt_get_spi_settings(mpt, cts) != 0) {
3515 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3519 scsi = &cts->proto_specific.scsi;
3520 scsi->valid = CTS_SCSI_VALID_TQ;
3521 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3525 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3526 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3527 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3528 } else if (mpt->is_sas) {
3529 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3530 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3531 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3532 } else if (mpt_get_spi_settings(mpt, cts) != 0) {
3533 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3537 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3540 case XPT_CALC_GEOMETRY:
3542 struct ccb_calc_geometry *ccg;
3545 if (ccg->block_size == 0) {
3546 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3547 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3550 mpt_calc_geometry(ccg, /*extended*/1);
3551 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
3554 case XPT_PATH_INQ: /* Path routing inquiry */
3556 struct ccb_pathinq *cpi = &ccb->cpi;
3558 cpi->version_num = 1;
3559 cpi->target_sprt = 0;
3560 cpi->hba_eng_cnt = 0;
3561 cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3563 * FC cards report MAX_DEVICES of 512, but
3564 * the MSG_SCSI_IO_REQUEST target id field
3565 * is only 8 bits. Until we fix the driver
3566 * to support 'channels' for bus overflow,
3569 if (cpi->max_target > 255) {
3570 cpi->max_target = 255;
3574 * VMware ESX reports > 16 devices and then dies when we probe.
3576 if (mpt->is_spi && cpi->max_target > 15) {
3577 cpi->max_target = 15;
3580 cpi->initiator_id = mpt->mpt_ini_id;
3581 cpi->bus_id = cam_sim_bus(sim);
3584 * The base speed is the speed of the underlying connection.
3586 #ifdef CAM_NEW_TRAN_CODE
3587 cpi->protocol = PROTO_SCSI;
3589 cpi->hba_misc = PIM_NOBUSRESET;
3590 cpi->base_transfer_speed = 100000;
3591 cpi->hba_inquiry = PI_TAG_ABLE;
3592 cpi->transport = XPORT_FC;
3593 cpi->transport_version = 0;
3594 cpi->protocol_version = SCSI_REV_SPC;
3595 } else if (mpt->is_sas) {
3596 cpi->hba_misc = PIM_NOBUSRESET;
3597 cpi->base_transfer_speed = 300000;
3598 cpi->hba_inquiry = PI_TAG_ABLE;
3599 cpi->transport = XPORT_SAS;
3600 cpi->transport_version = 0;
3601 cpi->protocol_version = SCSI_REV_SPC2;
3603 cpi->hba_misc = PIM_SEQSCAN;
3604 cpi->base_transfer_speed = 3300;
3605 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3606 cpi->transport = XPORT_SPI;
3607 cpi->transport_version = 2;
3608 cpi->protocol_version = SCSI_REV_2;
3612 cpi->hba_misc = PIM_NOBUSRESET;
3613 cpi->base_transfer_speed = 100000;
3614 cpi->hba_inquiry = PI_TAG_ABLE;
3615 } else if (mpt->is_sas) {
3616 cpi->hba_misc = PIM_NOBUSRESET;
3617 cpi->base_transfer_speed = 300000;
3618 cpi->hba_inquiry = PI_TAG_ABLE;
3620 cpi->hba_misc = PIM_SEQSCAN;
3621 cpi->base_transfer_speed = 3300;
3622 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3627 * We give our fake RAID passhtru bus a width that is MaxVolumes
3628 * wide and restrict it to one lun.
3630 if (raid_passthru) {
3631 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3632 cpi->initiator_id = cpi->max_target + 1;
3636 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3637 cpi->hba_misc |= PIM_NOINITIATOR;
3639 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3641 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3643 cpi->target_sprt = 0;
3645 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3646 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3647 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3648 cpi->unit_number = cam_sim_unit(sim);
3649 cpi->ccb_h.status = CAM_REQ_CMP;
3652 case XPT_EN_LUN: /* Enable LUN as a target */
3656 CAMLOCK_2_MPTLOCK(mpt);
3657 if (ccb->cel.enable)
3658 result = mpt_enable_lun(mpt,
3659 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3661 result = mpt_disable_lun(mpt,
3662 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3663 MPTLOCK_2_CAMLOCK(mpt);
3665 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3667 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3671 case XPT_NOTIFY_ACK: /* recycle notify ack */
3672 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */
3673 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
3675 tgt_resource_t *trtp;
3676 lun_id_t lun = ccb->ccb_h.target_lun;
3677 ccb->ccb_h.sim_priv.entries[0].field = 0;
3678 ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3679 ccb->ccb_h.flags = 0;
3681 if (lun == CAM_LUN_WILDCARD) {
3682 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3683 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3686 trtp = &mpt->trt_wildcard;
3687 } else if (lun >= MPT_MAX_LUNS) {
3688 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3691 trtp = &mpt->trt[lun];
3693 CAMLOCK_2_MPTLOCK(mpt);
3694 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3695 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3696 "Put FREE ATIO %p lun %d\n", ccb, lun);
3697 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3699 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3700 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3701 "Put FREE INOT lun %d\n", lun);
3702 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3705 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3707 mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3708 MPTLOCK_2_CAMLOCK(mpt);
3711 case XPT_CONT_TARGET_IO:
3712 CAMLOCK_2_MPTLOCK(mpt);
3713 mpt_target_start_io(mpt, ccb);
3714 MPTLOCK_2_CAMLOCK(mpt);
3718 ccb->ccb_h.status = CAM_REQ_INVALID;
3725 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3727 #ifdef CAM_NEW_TRAN_CODE
3728 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3729 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3732 uint32_t dval, pval, oval;
3735 if (IS_CURRENT_SETTINGS(cts) == 0) {
3736 tgt = cts->ccb_h.target_id;
3737 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3738 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3742 tgt = cts->ccb_h.target_id;
3746 * We aren't looking at Port Page 2 BIOS settings here-
3747 * sometimes these have been known to be bogus XXX.
3749 * For user settings, we pick the max from port page 0
3751 * For current settings we read the current settings out from
3752 * device page 0 for that target.
3754 if (IS_CURRENT_SETTINGS(cts)) {
3755 CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3758 CAMLOCK_2_MPTLOCK(mpt);
3759 tmp = mpt->mpt_dev_page0[tgt];
3760 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3761 sizeof(tmp), FALSE, 5000);
3763 MPTLOCK_2_CAMLOCK(mpt);
3764 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3767 MPTLOCK_2_CAMLOCK(mpt);
3768 mpt_lprt(mpt, MPT_PRT_DEBUG,
3769 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3770 tmp.NegotiatedParameters, tmp.Information);
3771 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3772 DP_WIDE : DP_NARROW;
3773 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3774 DP_DISC_ENABLE : DP_DISC_DISABL;
3775 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3776 DP_TQING_ENABLE : DP_TQING_DISABL;
3777 oval = tmp.NegotiatedParameters;
3778 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3779 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3780 pval = tmp.NegotiatedParameters;
3781 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3782 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3783 mpt->mpt_dev_page0[tgt] = tmp;
3785 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3786 oval = mpt->mpt_port_page0.Capabilities;
3787 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3788 pval = mpt->mpt_port_page0.Capabilities;
3789 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3792 #ifndef CAM_NEW_TRAN_CODE
3793 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3795 cts->sync_period = pval;
3796 cts->sync_offset = oval;
3797 cts->valid |= CCB_TRANS_SYNC_RATE_VALID;
3798 cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID;
3799 cts->valid |= CCB_TRANS_BUS_WIDTH_VALID;
3800 if (dval & DP_WIDE) {
3801 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3803 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3805 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3806 cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3807 if (dval & DP_DISC_ENABLE) {
3808 cts->flags |= CCB_TRANS_DISC_ENB;
3810 if (dval & DP_TQING_ENABLE) {
3811 cts->flags |= CCB_TRANS_TAG_ENB;
3819 spi->sync_offset = oval;
3820 spi->sync_period = pval;
3821 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3822 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3823 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3824 if (dval & DP_WIDE) {
3825 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3827 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3829 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3830 scsi->valid = CTS_SCSI_VALID_TQ;
3831 if (dval & DP_TQING_ENABLE) {
3832 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3834 spi->valid |= CTS_SPI_VALID_DISC;
3835 if (dval & DP_DISC_ENABLE) {
3836 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3840 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3841 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3842 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3847 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3849 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3851 ptr = &mpt->mpt_dev_page1[tgt];
3853 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3855 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3860 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3862 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3864 ptr = &mpt->mpt_dev_page1[tgt];
3865 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3866 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3867 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3868 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3869 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3873 ptr->RequestedParameters |=
3874 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3875 ptr->RequestedParameters |=
3876 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3878 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3881 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3882 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3887 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3889 CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3892 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3893 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3894 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3895 tmp = mpt->mpt_dev_page1[tgt];
3896 rv = mpt_write_cur_cfg_page(mpt, tgt,
3897 &tmp.Header, sizeof(tmp), FALSE, 5000);
3899 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3906 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
3908 #if __FreeBSD_version >= 500000
3909 cam_calc_geometry(ccg, extended);
3912 uint32_t secs_per_cylinder;
3914 if (ccg->block_size == 0) {
3915 ccg->ccb_h.status = CAM_REQ_INVALID;
3918 size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
3919 if (size_mb > 1024 && extended) {
3921 ccg->secs_per_track = 63;
3924 ccg->secs_per_track = 32;
3926 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3927 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3928 ccg->ccb_h.status = CAM_REQ_CMP;
3932 /****************************** Timeout Recovery ******************************/
3934 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3938 error = mpt_kthread_create(mpt_recovery_thread, mpt,
3939 &mpt->recovery_thread, /*flags*/0,
3940 /*altstack*/0, "mpt_recovery%d", mpt->unit);
3945 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3947 if (mpt->recovery_thread == NULL) {
3950 mpt->shutdwn_recovery = 1;
3953 * Sleep on a slightly different location
3954 * for this interlock just for added safety.
3956 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3960 mpt_recovery_thread(void *arg)
3962 struct mpt_softc *mpt;
3964 #if __FreeBSD_version >= 500000
3967 mpt = (struct mpt_softc *)arg;
3970 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3971 if (mpt->shutdwn_recovery == 0) {
3972 mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3975 if (mpt->shutdwn_recovery != 0) {
3978 mpt_recover_commands(mpt);
3980 mpt->recovery_thread = NULL;
3981 wakeup(&mpt->recovery_thread);
3983 #if __FreeBSD_version >= 500000
3990 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3991 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3993 MSG_SCSI_TASK_MGMT *tmf_req;
3997 * Wait for any current TMF request to complete.
3998 * We're only allowed to issue one TMF at a time.
4000 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
4001 sleep_ok, MPT_TMF_MAX_TIMEOUT);
4003 mpt_reset(mpt, TRUE);
4007 mpt_assign_serno(mpt, mpt->tmf_req);
4008 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
4010 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
4011 memset(tmf_req, 0, sizeof(*tmf_req));
4012 tmf_req->TargetID = target;
4013 tmf_req->Bus = channel;
4014 tmf_req->ChainOffset = 0;
4015 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
4016 tmf_req->Reserved = 0;
4017 tmf_req->TaskType = type;
4018 tmf_req->Reserved1 = 0;
4019 tmf_req->MsgFlags = flags;
4020 tmf_req->MsgContext =
4021 htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
4022 memset(&tmf_req->LUN, 0,
4023 sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2));
4025 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4026 tmf_req->LUN[1] = lun & 0xff;
4028 tmf_req->LUN[1] = lun;
4030 tmf_req->TaskMsgContext = abort_ctx;
4032 mpt_lprt(mpt, MPT_PRT_DEBUG,
4033 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
4034 mpt->tmf_req->serno, tmf_req->MsgContext);
4035 if (mpt->verbose > MPT_PRT_DEBUG) {
4036 mpt_print_request(tmf_req);
4039 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
4040 ("mpt_scsi_send_tmf: tmf_req already on pending list"));
4041 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
4042 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
4043 if (error != MPT_OK) {
4044 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
4045 mpt->tmf_req->state = REQ_STATE_FREE;
4046 mpt_reset(mpt, TRUE);
4052 * When a command times out, it is placed on the requeust_timeout_list
4053 * and we wake our recovery thread. The MPT-Fusion architecture supports
4054 * only a single TMF operation at a time, so we serially abort/bdr, etc,
4055 * the timedout transactions. The next TMF is issued either by the
4056 * completion handler of the current TMF waking our recovery thread,
4057 * or the TMF timeout handler causing a hard reset sequence.
4060 mpt_recover_commands(struct mpt_softc *mpt)
4066 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4068 * No work to do- leave.
4070 mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
4075 * Flush any commands whose completion coincides with their timeout.
4079 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4081 * The timedout commands have already
4082 * completed. This typically means
4083 * that either the timeout value was on
4084 * the hairy edge of what the device
4085 * requires or - more likely - interrupts
4086 * are not happening.
4088 mpt_prt(mpt, "Timedout requests already complete. "
4089 "Interrupts may not be functioning.\n");
4090 mpt_enable_ints(mpt);
4095 * We have no visibility into the current state of the
4096 * controller, so attempt to abort the commands in the
4097 * order they timed-out. For initiator commands, we
4098 * depend on the reply handler pulling requests off
4101 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4104 MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4106 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4107 req, req->serno, hdrp->Function);
4110 mpt_prt(mpt, "null ccb in timed out request. "
4111 "Resetting Controller.\n");
4112 mpt_reset(mpt, TRUE);
4115 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4118 * Check to see if this is not an initiator command and
4119 * deal with it differently if it is.
4121 switch (hdrp->Function) {
4122 case MPI_FUNCTION_SCSI_IO_REQUEST:
4123 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4127 * XXX: FIX ME: need to abort target assists...
4129 mpt_prt(mpt, "just putting it back on the pend q\n");
4130 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4131 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4136 error = mpt_scsi_send_tmf(mpt,
4137 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4138 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4139 htole32(req->index | scsi_io_handler_id), TRUE);
4143 * mpt_scsi_send_tmf hard resets on failure, so no
4144 * need to do so here. Our queue should be emptied
4145 * by the hard reset.
4150 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4151 REQ_STATE_DONE, TRUE, 500);
4153 status = mpt->tmf_req->IOCStatus;
4154 response = mpt->tmf_req->ResponseCode;
4155 mpt->tmf_req->state = REQ_STATE_FREE;
4159 * If we've errored out,, reset the controller.
4161 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4162 "Resetting controller\n");
4163 mpt_reset(mpt, TRUE);
4167 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4168 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4169 "Resetting controller.\n", status);
4170 mpt_reset(mpt, TRUE);
4174 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4175 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4176 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4177 "Resetting controller.\n", response);
4178 mpt_reset(mpt, TRUE);
4181 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4185 /************************ Target Mode Support ****************************/
4187 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4189 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4190 PTR_SGE_TRANSACTION32 tep;
4191 PTR_SGE_SIMPLE32 se;
4195 paddr = req->req_pbuf;
4196 paddr += MPT_RQSL(mpt);
4199 memset(fc, 0, MPT_REQUEST_AREA);
4200 fc->BufferCount = 1;
4201 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4202 fc->MsgContext = htole32(req->index | fc_els_handler_id);
4205 * Okay, set up ELS buffer pointers. ELS buffer pointers
4206 * consist of a TE SGL element (with details length of zero)
4207 * followe by a SIMPLE SGL element which holds the address
4211 tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4213 tep->ContextSize = 4;
4215 tep->TransactionContext[0] = htole32(ioindex);
4217 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4219 MPI_SGE_FLAGS_HOST_TO_IOC |
4220 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4221 MPI_SGE_FLAGS_LAST_ELEMENT |
4222 MPI_SGE_FLAGS_END_OF_LIST |
4223 MPI_SGE_FLAGS_END_OF_BUFFER;
4224 fl <<= MPI_SGE_FLAGS_SHIFT;
4225 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4226 se->FlagsLength = htole32(fl);
4227 se->Address = htole32((uint32_t) paddr);
4228 mpt_lprt(mpt, MPT_PRT_DEBUG,
4229 "add ELS index %d ioindex %d for %p:%u\n",
4230 req->index, ioindex, req, req->serno);
4231 KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4232 ("mpt_fc_post_els: request not locked"));
4233 mpt_send_cmd(mpt, req);
4237 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4239 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4240 PTR_CMD_BUFFER_DESCRIPTOR cb;
4243 paddr = req->req_pbuf;
4244 paddr += MPT_RQSL(mpt);
4245 memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4246 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4249 fc->BufferCount = 1;
4250 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4251 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4253 cb = &fc->Buffer[0];
4254 cb->IoIndex = htole16(ioindex);
4255 cb->u.PhysicalAddress32 = htole32((U32) paddr);
4257 mpt_check_doorbell(mpt);
4258 mpt_send_cmd(mpt, req);
4262 mpt_add_els_buffers(struct mpt_softc *mpt)
4266 if (mpt->is_fc == 0) {
4270 if (mpt->els_cmds_allocated) {
4274 mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
4275 M_DEVBUF, M_NOWAIT | M_ZERO);
4277 if (mpt->els_cmd_ptrs == NULL) {
4282 * Feed the chip some ELS buffer resources
4284 for (i = 0; i < MPT_MAX_ELS; i++) {
4285 request_t *req = mpt_get_request(mpt, FALSE);
4289 req->state |= REQ_STATE_LOCKED;
4290 mpt->els_cmd_ptrs[i] = req;
4291 mpt_fc_post_els(mpt, req, i);
4295 mpt_prt(mpt, "unable to add ELS buffer resources\n");
4296 free(mpt->els_cmd_ptrs, M_DEVBUF);
4297 mpt->els_cmd_ptrs = NULL;
4300 if (i != MPT_MAX_ELS) {
4301 mpt_lprt(mpt, MPT_PRT_INFO,
4302 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS);
4304 mpt->els_cmds_allocated = i;
4309 mpt_add_target_commands(struct mpt_softc *mpt)
4313 if (mpt->tgt_cmd_ptrs) {
4317 max = MPT_MAX_REQUESTS(mpt) >> 1;
4318 if (max > mpt->mpt_max_tgtcmds) {
4319 max = mpt->mpt_max_tgtcmds;
4322 malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4323 if (mpt->tgt_cmd_ptrs == NULL) {
4325 "mpt_add_target_commands: could not allocate cmd ptrs\n");
4329 for (i = 0; i < max; i++) {
4332 req = mpt_get_request(mpt, FALSE);
4336 req->state |= REQ_STATE_LOCKED;
4337 mpt->tgt_cmd_ptrs[i] = req;
4338 mpt_post_target_command(mpt, req, i);
4343 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4344 free(mpt->tgt_cmd_ptrs, M_DEVBUF);
4345 mpt->tgt_cmd_ptrs = NULL;
4349 mpt->tgt_cmds_allocated = i;
4352 mpt_lprt(mpt, MPT_PRT_INFO,
4353 "added %d of %d target bufs\n", i, max);
4359 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4361 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4363 } else if (lun >= MPT_MAX_LUNS) {
4365 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4368 if (mpt->tenabled == 0) {
4370 (void) mpt_fc_reset_link(mpt, 0);
4374 if (lun == CAM_LUN_WILDCARD) {
4375 mpt->trt_wildcard.enabled = 1;
4377 mpt->trt[lun].enabled = 1;
4383 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4386 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4388 } else if (lun >= MPT_MAX_LUNS) {
4390 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4393 if (lun == CAM_LUN_WILDCARD) {
4394 mpt->trt_wildcard.enabled = 0;
4396 mpt->trt[lun].enabled = 0;
4398 for (i = 0; i < MPT_MAX_LUNS; i++) {
4399 if (mpt->trt[lun].enabled) {
4403 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4405 (void) mpt_fc_reset_link(mpt, 0);
4413 * Called with MPT lock held
4416 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4418 struct ccb_scsiio *csio = &ccb->csio;
4419 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4420 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4422 switch (tgt->state) {
4423 case TGT_STATE_IN_CAM:
4425 case TGT_STATE_MOVING_DATA:
4426 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4427 xpt_freeze_simq(mpt->sim, 1);
4428 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4429 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4430 MPTLOCK_2_CAMLOCK(mpt);
4432 CAMLOCK_2_MPTLOCK(mpt);
4435 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4436 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4437 mpt_tgt_dump_req_state(mpt, cmd_req);
4438 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4439 MPTLOCK_2_CAMLOCK(mpt);
4441 CAMLOCK_2_MPTLOCK(mpt);
4445 if (csio->dxfer_len) {
4446 bus_dmamap_callback_t *cb;
4447 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4450 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4451 ("dxfer_len %u but direction is NONE\n", csio->dxfer_len));
4453 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4454 if (mpt->outofbeer == 0) {
4456 xpt_freeze_simq(mpt->sim, 1);
4457 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4459 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4460 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4461 MPTLOCK_2_CAMLOCK(mpt);
4463 CAMLOCK_2_MPTLOCK(mpt);
4466 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4467 if (sizeof (bus_addr_t) > 4) {
4468 cb = mpt_execute_req_a64;
4470 cb = mpt_execute_req;
4474 ccb->ccb_h.ccb_req_ptr = req;
4477 * Record the currently active ccb and the
4478 * request for it in our target state area.
4483 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4487 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4489 ta->QueueTag = ssp->InitiatorTag;
4490 } else if (mpt->is_spi) {
4491 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4493 ta->QueueTag = sp->Tag;
4495 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4496 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4497 ta->ReplyWord = htole32(tgt->reply_desc);
4498 if (csio->ccb_h.target_lun > 256) {
4500 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4501 ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4503 ta->LUN[1] = csio->ccb_h.target_lun;
4506 ta->RelativeOffset = tgt->bytes_xfered;
4507 ta->DataLength = ccb->csio.dxfer_len;
4508 if (ta->DataLength > tgt->resid) {
4509 ta->DataLength = tgt->resid;
4513 * XXX Should be done after data transfer completes?
4515 tgt->resid -= csio->dxfer_len;
4516 tgt->bytes_xfered += csio->dxfer_len;
4518 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4519 ta->TargetAssistFlags |=
4520 TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4523 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4524 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4525 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4526 ta->TargetAssistFlags |=
4527 TARGET_ASSIST_FLAGS_AUTO_STATUS;
4530 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4532 mpt_lprt(mpt, MPT_PRT_DEBUG,
4533 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4534 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4535 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4537 MPTLOCK_2_CAMLOCK(mpt);
4538 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
4539 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
4541 int s = splsoftvm();
4542 error = bus_dmamap_load(mpt->buffer_dmat,
4543 req->dmap, csio->data_ptr, csio->dxfer_len,
4546 if (error == EINPROGRESS) {
4547 xpt_freeze_simq(mpt->sim, 1);
4548 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4552 * We have been given a pointer to single
4555 struct bus_dma_segment seg;
4556 seg.ds_addr = (bus_addr_t)
4557 (vm_offset_t)csio->data_ptr;
4558 seg.ds_len = csio->dxfer_len;
4559 (*cb)(req, &seg, 1, 0);
4563 * We have been given a list of addresses.
4564 * This case could be easily supported but they are not
4565 * currently generated by the CAM subsystem so there
4566 * is no point in wasting the time right now.
4568 struct bus_dma_segment *sgs;
4569 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4570 (*cb)(req, NULL, 0, EFAULT);
4572 /* Just use the segments provided */
4573 sgs = (struct bus_dma_segment *)csio->data_ptr;
4574 (*cb)(req, sgs, csio->sglist_cnt, 0);
4577 CAMLOCK_2_MPTLOCK(mpt);
4579 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4582 * XXX: I don't know why this seems to happen, but
4583 * XXX: completing the CCB seems to make things happy.
4584 * XXX: This seems to happen if the initiator requests
4585 * XXX: enough data that we have to do multiple CTIOs.
4587 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4588 mpt_lprt(mpt, MPT_PRT_DEBUG,
4589 "Meaningless STATUS CCB (%p): flags %x status %x "
4590 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4591 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4592 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4593 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4594 MPTLOCK_2_CAMLOCK(mpt);
4596 CAMLOCK_2_MPTLOCK(mpt);
4599 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4601 memcpy(sp, &csio->sense_data,
4602 min(csio->sense_len, MPT_SENSE_SIZE));
4604 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4609 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4610 uint32_t lun, int send, uint8_t *data, size_t length)
4612 mpt_tgt_state_t *tgt;
4613 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4621 * We enter with resid set to the data load for the command.
4623 tgt = MPT_TGT_STATE(mpt, cmd_req);
4624 if (length == 0 || tgt->resid == 0) {
4626 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4630 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4631 mpt_prt(mpt, "out of resources- dropping local response\n");
4637 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4641 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4642 ta->QueueTag = ssp->InitiatorTag;
4643 } else if (mpt->is_spi) {
4644 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4645 ta->QueueTag = sp->Tag;
4647 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4648 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4649 ta->ReplyWord = htole32(tgt->reply_desc);
4651 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4652 ta->LUN[1] = lun & 0xff;
4656 ta->RelativeOffset = 0;
4657 ta->DataLength = length;
4659 dptr = req->req_vbuf;
4660 dptr += MPT_RQSL(mpt);
4661 pptr = req->req_pbuf;
4662 pptr += MPT_RQSL(mpt);
4663 memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4665 se = (SGE_SIMPLE32 *) &ta->SGL[0];
4666 memset(se, 0,sizeof (*se));
4668 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4670 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4671 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4674 MPI_pSGE_SET_LENGTH(se, length);
4675 flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4676 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4677 MPI_pSGE_SET_FLAGS(se, flags);
4681 tgt->resid -= length;
4682 tgt->bytes_xfered = length;
4683 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4684 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4686 tgt->state = TGT_STATE_MOVING_DATA;
4688 mpt_send_cmd(mpt, req);
4692 * Abort queued up CCBs
4695 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4697 struct mpt_hdr_stailq *lp;
4698 struct ccb_hdr *srch;
4700 union ccb *accb = ccb->cab.abort_ccb;
4701 tgt_resource_t *trtp;
4703 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4705 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4706 trtp = &mpt->trt_wildcard;
4708 trtp = &mpt->trt[ccb->ccb_h.target_lun];
4711 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4713 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4716 return (CAM_REQ_INVALID);
4719 STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4720 if (srch == &accb->ccb_h) {
4722 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4727 accb->ccb_h.status = CAM_REQ_ABORTED;
4729 return (CAM_REQ_CMP);
4731 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4732 return (CAM_PATH_INVALID);
4736 * Ask the MPT to abort the current target command
4739 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4743 PTR_MSG_TARGET_MODE_ABORT abtp;
4745 req = mpt_get_request(mpt, FALSE);
4749 abtp = req->req_vbuf;
4750 memset(abtp, 0, sizeof (*abtp));
4752 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4753 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4754 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4755 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4757 if (mpt->is_fc || mpt->is_sas) {
4758 mpt_send_cmd(mpt, req);
4760 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4766 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4767 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4768 * FC929 to set bogus FC_RSP fields (nonzero residuals
4769 * but w/o RESID fields set). This causes QLogic initiators
4770 * to think maybe that a frame was lost.
4772 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4773 * we use allocated requests to do TARGET_ASSIST and we
4774 * need to know when to release them.
4778 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4779 uint8_t status, uint8_t const *sense_data)
4782 mpt_tgt_state_t *tgt;
4783 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4789 cmd_vbuf = cmd_req->req_vbuf;
4790 cmd_vbuf += MPT_RQSL(mpt);
4791 tgt = MPT_TGT_STATE(mpt, cmd_req);
4793 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4794 if (mpt->outofbeer == 0) {
4796 xpt_freeze_simq(mpt->sim, 1);
4797 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4800 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4801 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4802 MPTLOCK_2_CAMLOCK(mpt);
4804 CAMLOCK_2_MPTLOCK(mpt);
4807 "could not allocate status request- dropping\n");
4813 ccb->ccb_h.ccb_mpt_ptr = mpt;
4814 ccb->ccb_h.ccb_req_ptr = req;
4818 * Record the currently active ccb, if any, and the
4819 * request for it in our target state area.
4823 tgt->state = TGT_STATE_SENDING_STATUS;
4826 paddr = req->req_pbuf;
4827 paddr += MPT_RQSL(mpt);
4829 memset(tp, 0, sizeof (*tp));
4830 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4832 PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4833 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4837 sts_vbuf = req->req_vbuf;
4838 sts_vbuf += MPT_RQSL(mpt);
4839 rsp = (uint32_t *) sts_vbuf;
4840 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4843 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4844 * It has to be big-endian in memory and is organized
4845 * in 32 bit words, which are much easier to deal with
4846 * as words which are swizzled as needed.
4848 * All we're filling here is the FC_RSP payload.
4849 * We may just have the chip synthesize it if
4850 * we have no residual and an OK status.
4853 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4857 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */
4858 rsp[3] = htobe32(tgt->resid);
4859 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4860 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4863 if (status == SCSI_STATUS_CHECK_COND) {
4866 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */
4867 rsp[4] = htobe32(MPT_SENSE_SIZE);
4869 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4871 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4872 "TION but no sense data?\n");
4873 memset(&rsp, 0, MPT_SENSE_SIZE);
4875 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4876 rsp[i] = htobe32(rsp[i]);
4878 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4879 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4882 #ifndef WE_TRUST_AUTO_GOOD_STATUS
4883 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4885 rsp[2] = htobe32(rsp[2]);
4886 } else if (mpt->is_sas) {
4887 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4888 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4889 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4891 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4892 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4893 tp->StatusCode = status;
4894 tp->QueueTag = htole16(sp->Tag);
4895 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4898 tp->ReplyWord = htole32(tgt->reply_desc);
4899 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4901 #ifdef WE_CAN_USE_AUTO_REPOST
4902 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4904 if (status == SCSI_STATUS_OK && resplen == 0) {
4905 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4907 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4909 MPI_SGE_FLAGS_HOST_TO_IOC |
4910 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4911 MPI_SGE_FLAGS_LAST_ELEMENT |
4912 MPI_SGE_FLAGS_END_OF_LIST |
4913 MPI_SGE_FLAGS_END_OF_BUFFER;
4914 fl <<= MPI_SGE_FLAGS_SHIFT;
4916 tp->StatusDataSGE.FlagsLength = htole32(fl);
4919 mpt_lprt(mpt, MPT_PRT_DEBUG,
4920 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4921 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4922 req->serno, tgt->resid);
4924 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4925 ccb->ccb_h.timeout_ch = timeout(mpt_timeout, ccb, 60 * hz);
4927 mpt_send_cmd(mpt, req);
4931 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4932 tgt_resource_t *trtp, int init_id)
4934 struct ccb_immed_notify *inot;
4935 mpt_tgt_state_t *tgt;
4937 tgt = MPT_TGT_STATE(mpt, req);
4938 inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
4940 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4941 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4944 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4945 mpt_lprt(mpt, MPT_PRT_DEBUG1,
4946 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4948 memset(&inot->sense_data, 0, sizeof (inot->sense_data));
4949 inot->sense_len = 0;
4950 memset(inot->message_args, 0, sizeof (inot->message_args));
4951 inot->initiator_id = init_id; /* XXX */
4954 * This is a somewhat grotesque attempt to map from task management
4955 * to old style SCSI messages. God help us all.
4958 case MPT_ABORT_TASK_SET:
4959 inot->message_args[0] = MSG_ABORT_TAG;
4961 case MPT_CLEAR_TASK_SET:
4962 inot->message_args[0] = MSG_CLEAR_TASK_SET;
4964 case MPT_TARGET_RESET:
4965 inot->message_args[0] = MSG_TARGET_RESET;
4968 inot->message_args[0] = MSG_CLEAR_ACA;
4970 case MPT_TERMINATE_TASK:
4971 inot->message_args[0] = MSG_ABORT_TAG;
4974 inot->message_args[0] = MSG_NOOP;
4977 tgt->ccb = (union ccb *) inot;
4978 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4979 MPTLOCK_2_CAMLOCK(mpt);
4980 xpt_done((union ccb *)inot);
4981 CAMLOCK_2_MPTLOCK(mpt);
4985 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4987 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
4988 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
4989 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ',
4990 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I',
4991 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V',
4994 struct ccb_accept_tio *atiop;
4997 mpt_tgt_state_t *tgt;
4998 tgt_resource_t *trtp = NULL;
5003 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
5007 * First, DMA sync the received command-
5008 * which is in the *request* * phys area.
5010 * XXX: We could optimize this for a range
5012 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
5013 BUS_DMASYNC_POSTREAD);
5016 * Stash info for the current command where we can get at it later.
5018 vbuf = req->req_vbuf;
5019 vbuf += MPT_RQSL(mpt);
5022 * Get our state pointer set up.
5024 tgt = MPT_TGT_STATE(mpt, req);
5025 if (tgt->state != TGT_STATE_LOADED) {
5026 mpt_tgt_dump_req_state(mpt, req);
5027 panic("bad target state in mpt_scsi_tgt_atio");
5029 memset(tgt, 0, sizeof (mpt_tgt_state_t));
5030 tgt->state = TGT_STATE_IN_CAM;
5031 tgt->reply_desc = reply_desc;
5032 ioindex = GET_IO_INDEX(reply_desc);
5033 if (mpt->verbose >= MPT_PRT_DEBUG) {
5034 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
5035 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
5036 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
5037 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
5040 PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
5041 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
5042 if (fc->FcpCntl[2]) {
5044 * Task Management Request
5046 switch (fc->FcpCntl[2]) {
5048 fct = MPT_ABORT_TASK_SET;
5051 fct = MPT_CLEAR_TASK_SET;
5054 fct = MPT_TARGET_RESET;
5057 fct = MPT_CLEAR_ACA;
5060 fct = MPT_TERMINATE_TASK;
5063 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
5065 mpt_scsi_tgt_status(mpt, 0, req,
5070 switch (fc->FcpCntl[1]) {
5072 tag_action = MSG_SIMPLE_Q_TAG;
5075 tag_action = MSG_HEAD_OF_Q_TAG;
5078 tag_action = MSG_ORDERED_Q_TAG;
5082 * Bah. Ignore Untagged Queing and ACA
5084 tag_action = MSG_SIMPLE_Q_TAG;
5088 tgt->resid = be32toh(fc->FcpDl);
5090 lunptr = fc->FcpLun;
5091 itag = be16toh(fc->OptionalOxid);
5092 } else if (mpt->is_sas) {
5093 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
5094 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
5096 lunptr = ssp->LogicalUnitNumber;
5097 itag = ssp->InitiatorTag;
5099 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
5100 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
5102 lunptr = sp->LogicalUnitNumber;
5107 * Generate a simple lun
5109 switch (lunptr[0] & 0xc0) {
5111 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
5117 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
5123 * Deal with non-enabled or bad luns here.
5125 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
5126 mpt->trt[lun].enabled == 0) {
5127 if (mpt->twildcard) {
5128 trtp = &mpt->trt_wildcard;
5129 } else if (fct == MPT_NIL_TMT_VALUE) {
5131 * In this case, we haven't got an upstream listener
5132 * for either a specific lun or wildcard luns. We
5133 * have to make some sensible response. For regular
5134 * inquiry, just return some NOT HERE inquiry data.
5135 * For VPD inquiry, report illegal field in cdb.
5136 * For REQUEST SENSE, just return NO SENSE data.
5137 * REPORT LUNS gets illegal command.
5138 * All other commands get 'no such device'.
5140 uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
5143 memset(buf, 0, MPT_SENSE_SIZE);
5144 cond = SCSI_STATUS_CHECK_COND;
5149 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5159 len = min(tgt->resid, cdbp[4]);
5160 len = min(len, sizeof (null_iqd));
5161 mpt_lprt(mpt, MPT_PRT_DEBUG,
5162 "local inquiry %ld bytes\n", (long) len);
5163 mpt_scsi_tgt_local(mpt, req, lun, 1,
5170 len = min(tgt->resid, cdbp[4]);
5171 len = min(len, sizeof (buf));
5172 mpt_lprt(mpt, MPT_PRT_DEBUG,
5173 "local reqsense %ld bytes\n", (long) len);
5174 mpt_scsi_tgt_local(mpt, req, lun, 1,
5179 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
5183 mpt_lprt(mpt, MPT_PRT_DEBUG,
5184 "CMD 0x%x to unmanaged lun %u\n",
5189 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
5192 /* otherwise, leave trtp NULL */
5194 trtp = &mpt->trt[lun];
5198 * Deal with any task management
5200 if (fct != MPT_NIL_TMT_VALUE) {
5202 mpt_prt(mpt, "task mgmt function %x but no listener\n",
5204 mpt_scsi_tgt_status(mpt, 0, req,
5207 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5208 GET_INITIATOR_INDEX(reply_desc));
5214 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5215 if (atiop == NULL) {
5216 mpt_lprt(mpt, MPT_PRT_WARN,
5217 "no ATIOs for lun %u- sending back %s\n", lun,
5218 mpt->tenabled? "QUEUE FULL" : "BUSY");
5219 mpt_scsi_tgt_status(mpt, NULL, req,
5220 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5224 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5225 mpt_lprt(mpt, MPT_PRT_DEBUG1,
5226 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
5227 atiop->ccb_h.ccb_mpt_ptr = mpt;
5228 atiop->ccb_h.status = CAM_CDB_RECVD;
5229 atiop->ccb_h.target_lun = lun;
5230 atiop->sense_len = 0;
5231 atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5232 atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
5233 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5236 * The tag we construct here allows us to find the
5237 * original request that the command came in with.
5239 * This way we don't have to depend on anything but the
5240 * tag to find things when CCBs show back up from CAM.
5242 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5243 tgt->tag_id = atiop->tag_id;
5245 atiop->tag_action = tag_action;
5246 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
5248 if (mpt->verbose >= MPT_PRT_DEBUG) {
5250 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
5251 atiop->ccb_h.target_lun);
5252 for (i = 0; i < atiop->cdb_len; i++) {
5253 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5254 (i == (atiop->cdb_len - 1))? '>' : ' ');
5256 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5257 itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
5260 MPTLOCK_2_CAMLOCK(mpt);
5261 xpt_done((union ccb *)atiop);
5262 CAMLOCK_2_MPTLOCK(mpt);
5266 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5268 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5270 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5271 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
5272 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
5273 tgt->tag_id, tgt->state);
5277 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5279 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5280 req->index, req->index, req->state);
5281 mpt_tgt_dump_tgt_state(mpt, req);
5285 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5286 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5292 if (reply_frame == NULL) {
5294 * Figure out what the state of the command is.
5296 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5299 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5301 mpt_req_not_spcl(mpt, tgt->req,
5302 "turbo scsi_tgt_reply associated req", __LINE__);
5305 switch(tgt->state) {
5306 case TGT_STATE_LOADED:
5308 * This is a new command starting.
5310 mpt_scsi_tgt_atio(mpt, req, reply_desc);
5312 case TGT_STATE_MOVING_DATA:
5314 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
5317 if (tgt->req == NULL) {
5318 panic("mpt: turbo target reply with null "
5319 "associated request moving data");
5323 if (tgt->is_local == 0) {
5324 panic("mpt: turbo target reply with "
5325 "null associated ccb moving data");
5328 mpt_lprt(mpt, MPT_PRT_DEBUG,
5329 "TARGET_ASSIST local done\n");
5330 TAILQ_REMOVE(&mpt->request_pending_list,
5332 mpt_free_request(mpt, tgt->req);
5334 mpt_scsi_tgt_status(mpt, NULL, req,
5340 untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
5341 mpt_lprt(mpt, MPT_PRT_DEBUG,
5342 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5343 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5345 * Free the Target Assist Request
5347 KASSERT(tgt->req->ccb == ccb,
5348 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5349 tgt->req->serno, tgt->req->ccb));
5350 TAILQ_REMOVE(&mpt->request_pending_list,
5352 mpt_free_request(mpt, tgt->req);
5356 * Do we need to send status now? That is, are
5357 * we done with all our data transfers?
5359 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5360 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5361 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5362 KASSERT(ccb->ccb_h.status,
5363 ("zero ccb sts at %d\n", __LINE__));
5364 tgt->state = TGT_STATE_IN_CAM;
5365 if (mpt->outofbeer) {
5366 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5368 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5370 MPTLOCK_2_CAMLOCK(mpt);
5372 CAMLOCK_2_MPTLOCK(mpt);
5376 * Otherwise, send status (and sense)
5378 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5380 memcpy(sp, &ccb->csio.sense_data,
5381 min(ccb->csio.sense_len, MPT_SENSE_SIZE));
5383 mpt_scsi_tgt_status(mpt, ccb, req,
5384 ccb->csio.scsi_status, sp);
5387 case TGT_STATE_SENDING_STATUS:
5388 case TGT_STATE_MOVING_DATA_AND_STATUS:
5393 if (tgt->req == NULL) {
5394 panic("mpt: turbo target reply with null "
5395 "associated request sending status");
5402 TGT_STATE_MOVING_DATA_AND_STATUS) {
5405 untimeout(mpt_timeout, ccb,
5406 ccb->ccb_h.timeout_ch);
5407 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5408 ccb->ccb_h.status |= CAM_SENT_SENSE;
5410 mpt_lprt(mpt, MPT_PRT_DEBUG,
5411 "TARGET_STATUS tag %x sts %x flgs %x req "
5412 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5413 ccb->ccb_h.flags, tgt->req);
5415 * Free the Target Send Status Request
5417 KASSERT(tgt->req->ccb == ccb,
5418 ("tgt->req %p:%u tgt->req->ccb %p",
5419 tgt->req, tgt->req->serno, tgt->req->ccb));
5421 * Notify CAM that we're done
5423 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5424 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5425 KASSERT(ccb->ccb_h.status,
5426 ("ZERO ccb sts at %d\n", __LINE__));
5429 mpt_lprt(mpt, MPT_PRT_DEBUG,
5430 "TARGET_STATUS non-CAM for req %p:%u\n",
5431 tgt->req, tgt->req->serno);
5433 TAILQ_REMOVE(&mpt->request_pending_list,
5435 mpt_free_request(mpt, tgt->req);
5439 * And re-post the Command Buffer.
5440 * This will reset the state.
5442 ioindex = GET_IO_INDEX(reply_desc);
5443 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5445 mpt_post_target_command(mpt, req, ioindex);
5448 * And post a done for anyone who cares
5451 if (mpt->outofbeer) {
5452 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5454 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5456 MPTLOCK_2_CAMLOCK(mpt);
5458 CAMLOCK_2_MPTLOCK(mpt);
5462 case TGT_STATE_NIL: /* XXX This Never Happens XXX */
5463 tgt->state = TGT_STATE_LOADED;
5466 mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5467 "Reply Function\n", tgt->state);
5472 status = le16toh(reply_frame->IOCStatus);
5473 if (status != MPI_IOCSTATUS_SUCCESS) {
5474 dbg = MPT_PRT_ERROR;
5476 dbg = MPT_PRT_DEBUG1;
5480 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5481 req, req->serno, reply_frame, reply_frame->Function, status);
5483 switch (reply_frame->Function) {
5484 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5486 mpt_tgt_state_t *tgt;
5488 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5490 if (status != MPI_IOCSTATUS_SUCCESS) {
5496 tgt = MPT_TGT_STATE(mpt, req);
5497 KASSERT(tgt->state == TGT_STATE_LOADING,
5498 ("bad state 0x%x on reply to buffer post\n", tgt->state));
5499 mpt_assign_serno(mpt, req);
5500 tgt->state = TGT_STATE_LOADED;
5503 case MPI_FUNCTION_TARGET_ASSIST:
5505 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5507 mpt_prt(mpt, "target assist completion\n");
5508 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5509 mpt_free_request(mpt, req);
5511 case MPI_FUNCTION_TARGET_STATUS_SEND:
5513 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5515 mpt_prt(mpt, "status send completion\n");
5516 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5517 mpt_free_request(mpt, req);
5519 case MPI_FUNCTION_TARGET_MODE_ABORT:
5521 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5522 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5523 PTR_MSG_TARGET_MODE_ABORT abtp =
5524 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5525 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5527 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5529 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5530 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5531 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5532 mpt_free_request(mpt, req);
5536 mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5537 "0x%x\n", reply_frame->Function);