2 * Copyright (c) 1997-2009 by Matthew Jacob
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 #include <dev/isp/isp_freebsd.h>
33 #include <sys/unistd.h>
34 #include <sys/kthread.h>
36 #include <sys/module.h>
37 #include <sys/ioccom.h>
38 #include <dev/isp/isp_ioctl.h>
39 #include <sys/devicestat.h>
40 #include <cam/cam_periph.h>
41 #include <cam/cam_xpt_periph.h>
43 #if __FreeBSD_version < 800002
44 #define THREAD_CREATE kthread_create
46 #define THREAD_CREATE kproc_create
49 MODULE_VERSION(isp, 1);
50 MODULE_DEPEND(isp, cam, 1, 1, 1);
51 int isp_announced = 0;
52 int isp_fabric_hysteresis = 5;
53 int isp_loop_down_limit = 60; /* default loop down limit */
54 int isp_change_is_bad = 0; /* "changed" devices are bad */
55 int isp_quickboot_time = 7; /* don't wait more than N secs for loop up */
56 int isp_gone_device_time = 30; /* grace time before reporting device lost */
57 int isp_autoconfig = 1; /* automatically attach/detach devices */
58 static const char *roles[4] = {
59 "(none)", "Target", "Initiator", "Target/Initiator"
61 static const char prom3[] = "Chan %d PortID 0x%06x Departed from Target %u because of %s";
62 static const char rqo[] = "%s: Request Queue Overflow\n";
64 static void isp_freeze_loopdown(ispsoftc_t *, int, char *);
65 static d_ioctl_t ispioctl;
66 static void isp_intr_enable(void *);
67 static void isp_cam_async(void *, uint32_t, struct cam_path *, void *);
68 static void isp_poll(struct cam_sim *);
69 static timeout_t isp_watchdog;
70 static timeout_t isp_gdt;
71 static task_fn_t isp_gdt_task;
72 static timeout_t isp_ldt;
73 static task_fn_t isp_ldt_task;
74 static void isp_kthread(void *);
75 static void isp_action(struct cam_sim *, union ccb *);
76 #ifdef ISP_INTERNAL_TARGET
77 static void isp_target_thread_pi(void *);
78 static void isp_target_thread_fc(void *);
80 static void isp_timer(void *);
82 static struct cdevsw isp_cdevsw = {
83 .d_version = D_VERSION,
89 isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan)
91 struct ccb_setasync csa;
93 struct cam_path *path;
96 * Construct our SIM entry.
98 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp, device_get_unit(isp->isp_dev), &isp->isp_osinfo.lock, isp->isp_maxcmds, isp->isp_maxcmds, devq);
105 if (xpt_bus_register(sim, isp->isp_dev, chan) != CAM_SUCCESS) {
107 cam_sim_free(sim, FALSE);
111 if (xpt_create_path_unlocked(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
113 xpt_bus_deregister(cam_sim_path(sim));
115 cam_sim_free(sim, FALSE);
118 xpt_setup_ccb(&csa.ccb_h, path, 5);
119 csa.ccb_h.func_code = XPT_SASYNC_CB;
120 csa.event_enable = AC_LOST_DEVICE;
121 csa.callback = isp_cam_async;
122 csa.callback_arg = sim;
123 xpt_action((union ccb *)&csa);
126 struct isp_spi *spi = ISP_SPI_PC(isp, chan);
129 #ifdef ISP_INTERNAL_TARGET
130 ISP_SET_PC(isp, chan, proc_active, 1);
131 if (THREAD_CREATE(isp_target_thread_pi, spi, &spi->target_proc, 0, 0, "%s: isp_test_tgt%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) {
132 ISP_SET_PC(isp, chan, proc_active, 0);
133 isp_prt(isp, ISP_LOGERR, "cannot create test target thread");
137 fcparam *fcp = FCPARAM(isp, chan);
138 struct isp_fc *fc = ISP_FC_PC(isp, chan);
146 callout_init_mtx(&fc->ldt, &isp->isp_osinfo.lock, 0);
147 callout_init_mtx(&fc->gdt, &isp->isp_osinfo.lock, 0);
148 TASK_INIT(&fc->ltask, 1, isp_ldt_task, fc);
149 TASK_INIT(&fc->gtask, 1, isp_gdt_task, fc);
152 * We start by being "loop down" if we have an initiator role
154 if (fcp->role & ISP_ROLE_INITIATOR) {
155 isp_freeze_loopdown(isp, chan, "isp_attach");
156 callout_reset(&fc->ldt, isp_quickboot_time * hz, isp_ldt, fc);
157 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Starting Initial Loop Down Timer @ %lu", (unsigned long) time_uptime);
160 if (THREAD_CREATE(isp_kthread, fc, &fc->kproc, 0, 0, "%s: fc_thrd%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) {
161 xpt_free_path(fc->path);
163 if (callout_active(&fc->ldt)) {
164 callout_stop(&fc->ldt);
166 xpt_bus_deregister(cam_sim_path(fc->sim));
168 cam_sim_free(fc->sim, FALSE);
171 #ifdef ISP_INTERNAL_TARGET
172 ISP_SET_PC(isp, chan, proc_active, 1);
173 if (THREAD_CREATE(isp_target_thread_fc, fc, &fc->target_proc, 0, 0, "%s: isp_test_tgt%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) {
174 ISP_SET_PC(isp, chan, proc_active, 0);
175 isp_prt(isp, ISP_LOGERR, "cannot create test target thread");
183 isp_attach(ispsoftc_t *isp)
185 const char *nu = device_get_nameunit(isp->isp_osinfo.dev);
186 int du = device_get_unit(isp->isp_dev);
189 isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
190 isp->isp_osinfo.ehook.ich_arg = isp;
192 * Haha. Set this first, because if we're loaded as a module isp_intr_enable
193 * will be called right awawy, which will clear isp_osinfo.ehook_active,
194 * which would be unwise to then set again later.
196 isp->isp_osinfo.ehook_active = 1;
197 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
198 isp_prt(isp, ISP_LOGERR, "could not establish interrupt enable hook");
203 * Create the device queue for our SIM(s).
205 isp->isp_osinfo.devq = cam_simq_alloc(isp->isp_maxcmds);
206 if (isp->isp_osinfo.devq == NULL) {
207 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
211 for (chan = 0; chan < isp->isp_nchan; chan++) {
212 if (isp_attach_chan(isp, isp->isp_osinfo.devq, chan)) {
217 callout_init_mtx(&isp->isp_osinfo.tmo, &isp->isp_osinfo.lock, 0);
218 callout_reset(&isp->isp_osinfo.tmo, hz, isp_timer, isp);
219 isp->isp_osinfo.timer_active = 1;
221 isp->isp_osinfo.cdev = make_dev(&isp_cdevsw, du, UID_ROOT, GID_OPERATOR, 0600, "%s", nu);
222 if (isp->isp_osinfo.cdev) {
223 isp->isp_osinfo.cdev->si_drv1 = isp;
228 while (--chan >= 0) {
230 struct cam_path *path;
232 sim = ISP_FC_PC(isp, chan)->sim;
233 path = ISP_FC_PC(isp, chan)->path;
235 sim = ISP_SPI_PC(isp, chan)->sim;
236 path = ISP_SPI_PC(isp, chan)->path;
240 xpt_bus_deregister(cam_sim_path(sim));
242 cam_sim_free(sim, FALSE);
244 if (isp->isp_osinfo.ehook_active) {
245 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
246 isp->isp_osinfo.ehook_active = 0;
248 if (isp->isp_osinfo.cdev) {
249 destroy_dev(isp->isp_osinfo.cdev);
250 isp->isp_osinfo.cdev = NULL;
252 cam_simq_free(isp->isp_osinfo.devq);
253 isp->isp_osinfo.devq = NULL;
258 isp_detach(ispsoftc_t *isp)
261 struct cam_path *path;
262 struct ccb_setasync csa;
266 for (chan = isp->isp_nchan - 1; chan >= 0; chan -= 1) {
268 sim = ISP_FC_PC(isp, chan)->sim;
269 path = ISP_FC_PC(isp, chan)->path;
271 sim = ISP_SPI_PC(isp, chan)->sim;
272 path = ISP_SPI_PC(isp, chan)->path;
274 if (sim->refcount > 2) {
279 if (isp->isp_osinfo.timer_active) {
280 callout_stop(&isp->isp_osinfo.tmo);
281 isp->isp_osinfo.timer_active = 0;
283 for (chan = isp->isp_nchan - 1; chan >= 0; chan -= 1) {
285 sim = ISP_FC_PC(isp, chan)->sim;
286 path = ISP_FC_PC(isp, chan)->path;
288 sim = ISP_SPI_PC(isp, chan)->sim;
289 path = ISP_SPI_PC(isp, chan)->path;
291 xpt_setup_ccb(&csa.ccb_h, path, 5);
292 csa.ccb_h.func_code = XPT_SASYNC_CB;
293 csa.event_enable = 0;
294 csa.callback = isp_cam_async;
295 csa.callback_arg = sim;
296 xpt_action((union ccb *)&csa);
298 xpt_bus_deregister(cam_sim_path(sim));
299 cam_sim_free(sim, FALSE);
302 if (isp->isp_osinfo.cdev) {
303 destroy_dev(isp->isp_osinfo.cdev);
304 isp->isp_osinfo.cdev = NULL;
306 if (isp->isp_osinfo.ehook_active) {
307 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
308 isp->isp_osinfo.ehook_active = 0;
310 if (isp->isp_osinfo.devq != NULL) {
311 cam_simq_free(isp->isp_osinfo.devq);
312 isp->isp_osinfo.devq = NULL;
318 isp_freeze_loopdown(ispsoftc_t *isp, int chan, char *msg)
321 struct isp_fc *fc = ISP_FC_PC(isp, chan);
322 if (fc->simqfrozen == 0) {
323 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown) chan %d", msg, chan);
324 fc->simqfrozen = SIMQFRZ_LOOPDOWN;
325 xpt_freeze_simq(fc->sim, 1);
327 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown) chan %d", msg, chan);
328 fc->simqfrozen |= SIMQFRZ_LOOPDOWN;
334 isp_unfreeze_loopdown(ispsoftc_t *isp, int chan)
337 struct isp_fc *fc = ISP_FC_PC(isp, chan);
338 int wasfrozen = fc->simqfrozen & SIMQFRZ_LOOPDOWN;
339 fc->simqfrozen &= ~SIMQFRZ_LOOPDOWN;
340 if (wasfrozen && fc->simqfrozen == 0) {
341 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "%s: Chan %d releasing simq", __func__, chan);
342 xpt_release_simq(fc->sim, 1);
349 ispioctl(struct cdev *dev, u_long c, caddr_t addr, int flags, struct thread *td)
352 int nr, chan, retval = ENOTTY;
359 int olddblev = isp->isp_dblev;
360 isp->isp_dblev = *(int *)addr;
361 *(int *)addr = olddblev;
367 if (chan < 0 || chan >= isp->isp_nchan) {
372 *(int *)addr = FCPARAM(isp, chan)->role;
374 *(int *)addr = SDPARAM(isp, chan)->role;
381 if (chan < 0 || chan >= isp->isp_nchan) {
386 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) {
392 * We don't really support dual role at present on FC cards.
394 * We should, but a bunch of things are currently broken,
397 if (nr == ISP_ROLE_BOTH) {
398 isp_prt(isp, ISP_LOGERR, "cannot support dual role at present");
402 *(int *)addr = FCPARAM(isp, chan)->role;
403 #ifdef ISP_INTERNAL_TARGET
405 retval = isp_fc_change_role(isp, chan, nr);
408 FCPARAM(isp, chan)->role = nr;
411 *(int *)addr = SDPARAM(isp, chan)->role;
412 SDPARAM(isp, chan)->role = nr;
419 #ifdef ISP_TARGET_MODE
420 isp_del_all_wwn_entries(isp, ISP_NOCHAN);
430 if (chan < 0 || chan >= isp->isp_nchan) {
435 if (isp_fc_runstate(isp, chan, 5 * 1000000)) {
447 if (chan < 0 || chan >= isp->isp_nchan) {
452 if (isp_control(isp, ISPCTL_SEND_LIP, chan)) {
460 case ISP_FC_GETDINFO:
462 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
468 if (ifc->loopid >= MAX_FC_TARG) {
472 lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid];
473 if (lp->state == FC_PORTDB_STATE_VALID || lp->target_mode) {
474 ifc->role = lp->roles;
475 ifc->loopid = lp->handle;
476 ifc->portid = lp->portid;
477 ifc->node_wwn = lp->node_wwn;
478 ifc->port_wwn = lp->port_wwn;
487 isp_stats_t *sp = (isp_stats_t *) addr;
489 ISP_MEMZERO(sp, sizeof (*sp));
490 sp->isp_stat_version = ISP_STATS_VERSION;
491 sp->isp_type = isp->isp_type;
492 sp->isp_revision = isp->isp_revision;
494 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
495 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
496 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
497 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
498 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
499 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
500 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
501 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
509 isp->isp_intbogus = 0;
510 isp->isp_intmboxc = 0;
511 isp->isp_intoasync = 0;
512 isp->isp_rsltccmplt = 0;
513 isp->isp_fphccmplt = 0;
514 isp->isp_rscchiwater = 0;
515 isp->isp_fpcchiwater = 0;
519 case ISP_FC_GETHINFO:
521 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
522 int chan = hba->fc_channel;
524 if (chan < 0 || chan >= isp->isp_nchan) {
528 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev);
529 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev);
530 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev);
531 hba->fc_nchannels = isp->isp_nchan;
533 hba->fc_nports = MAX_FC_TARG;
534 hba->fc_speed = FCPARAM(isp, hba->fc_channel)->isp_gbspeed;
535 hba->fc_topology = FCPARAM(isp, chan)->isp_topo + 1;
536 hba->fc_loopid = FCPARAM(isp, chan)->isp_loopid;
537 hba->nvram_node_wwn = FCPARAM(isp, chan)->isp_wwnn_nvram;
538 hba->nvram_port_wwn = FCPARAM(isp, chan)->isp_wwpn_nvram;
539 hba->active_node_wwn = FCPARAM(isp, chan)->isp_wwnn;
540 hba->active_port_wwn = FCPARAM(isp, chan)->isp_wwpn;
542 hba->fc_nports = MAX_TARGETS;
544 hba->fc_topology = 0;
545 hba->nvram_node_wwn = 0ull;
546 hba->nvram_port_wwn = 0ull;
547 hba->active_node_wwn = 0ull;
548 hba->active_port_wwn = 0ull;
556 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr;
565 if (chan < 0 || chan >= isp->isp_nchan) {
570 needmarker = retval = 0;
571 loopid = fct->loopid;
574 uint8_t local[QENTRY_LEN];
576 isp24xx_statusreq_t *sp;
577 fcparam *fcp = FCPARAM(isp, chan);
581 for (i = 0; i < MAX_FC_TARG; i++) {
582 lp = &fcp->portdb[i];
583 if (lp->handle == loopid) {
587 if (i == MAX_FC_TARG) {
592 /* XXX VALIDATE LP XXX */
593 tmf = (isp24xx_tmf_t *) local;
594 ISP_MEMZERO(tmf, QENTRY_LEN);
595 tmf->tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT;
596 tmf->tmf_header.rqs_entry_count = 1;
597 tmf->tmf_nphdl = lp->handle;
599 tmf->tmf_timeout = 2;
600 tmf->tmf_tidlo = lp->portid;
601 tmf->tmf_tidhi = lp->portid >> 16;
602 tmf->tmf_vpidx = ISP_GET_VPIDX(isp, chan);
603 tmf->tmf_lun[1] = fct->lun & 0xff;
604 if (fct->lun >= 256) {
605 tmf->tmf_lun[0] = 0x40 | (fct->lun >> 8);
607 switch (fct->action) {
609 tmf->tmf_flags = ISP24XX_TMF_CLEAR_ACA;
611 case IPT_TARGET_RESET:
612 tmf->tmf_flags = ISP24XX_TMF_TARGET_RESET;
616 tmf->tmf_flags = ISP24XX_TMF_LUN_RESET;
619 case IPT_CLEAR_TASK_SET:
620 tmf->tmf_flags = ISP24XX_TMF_CLEAR_TASK_SET;
623 case IPT_ABORT_TASK_SET:
624 tmf->tmf_flags = ISP24XX_TMF_ABORT_TASK_SET;
635 MBSINIT(&mbs, MBOX_EXEC_COMMAND_IOCB_A64, MBLOGALL, 5000000);
636 mbs.param[1] = QENTRY_LEN;
637 mbs.param[2] = DMA_WD1(fcp->isp_scdma);
638 mbs.param[3] = DMA_WD0(fcp->isp_scdma);
639 mbs.param[6] = DMA_WD3(fcp->isp_scdma);
640 mbs.param[7] = DMA_WD2(fcp->isp_scdma);
642 if (FC_SCRATCH_ACQUIRE(isp, chan)) {
647 isp_put_24xx_tmf(isp, tmf, fcp->isp_scratch);
648 MEMORYBARRIER(isp, SYNC_SFORDEV, 0, QENTRY_LEN, chan);
649 sp = (isp24xx_statusreq_t *) local;
650 sp->req_completion_status = 1;
651 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs);
652 MEMORYBARRIER(isp, SYNC_SFORCPU, QENTRY_LEN, QENTRY_LEN, chan);
653 isp_get_24xx_response(isp, &((isp24xx_statusreq_t *)fcp->isp_scratch)[1], sp);
654 FC_SCRATCH_RELEASE(isp, chan);
655 if (retval || sp->req_completion_status != 0) {
656 FC_SCRATCH_RELEASE(isp, chan);
665 MBSINIT(&mbs, 0, MBLOGALL, 0);
666 if (ISP_CAP_2KLOGIN(isp) == 0) {
669 switch (fct->action) {
671 mbs.param[0] = MBOX_CLEAR_ACA;
672 mbs.param[1] = loopid;
673 mbs.param[2] = fct->lun;
675 case IPT_TARGET_RESET:
676 mbs.param[0] = MBOX_TARGET_RESET;
677 mbs.param[1] = loopid;
681 mbs.param[0] = MBOX_LUN_RESET;
682 mbs.param[1] = loopid;
683 mbs.param[2] = fct->lun;
686 case IPT_CLEAR_TASK_SET:
687 mbs.param[0] = MBOX_CLEAR_TASK_SET;
688 mbs.param[1] = loopid;
689 mbs.param[2] = fct->lun;
692 case IPT_ABORT_TASK_SET:
693 mbs.param[0] = MBOX_ABORT_TASK_SET;
694 mbs.param[1] = loopid;
695 mbs.param[2] = fct->lun;
704 FCPARAM(isp, chan)->sendmarker = 1;
706 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs);
722 isp_intr_enable(void *arg)
725 ispsoftc_t *isp = arg;
727 for (chan = 0; chan < isp->isp_nchan; chan++) {
729 if (FCPARAM(isp, chan)->role != ISP_ROLE_NONE) {
730 ISP_ENABLE_INTS(isp);
734 if (SDPARAM(isp, chan)->role != ISP_ROLE_NONE) {
735 ISP_ENABLE_INTS(isp);
740 isp->isp_osinfo.ehook_active = 0;
742 /* Release our hook so that the boot can continue. */
743 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
750 static ISP_INLINE int isp_get_pcmd(ispsoftc_t *, union ccb *);
751 static ISP_INLINE void isp_free_pcmd(ispsoftc_t *, union ccb *);
753 static ISP_INLINE int
754 isp_get_pcmd(ispsoftc_t *isp, union ccb *ccb)
756 ISP_PCMD(ccb) = isp->isp_osinfo.pcmd_free;
757 if (ISP_PCMD(ccb) == NULL) {
760 isp->isp_osinfo.pcmd_free = ((struct isp_pcmd *)ISP_PCMD(ccb))->next;
764 static ISP_INLINE void
765 isp_free_pcmd(ispsoftc_t *isp, union ccb *ccb)
767 ((struct isp_pcmd *)ISP_PCMD(ccb))->next = isp->isp_osinfo.pcmd_free;
768 isp->isp_osinfo.pcmd_free = ISP_PCMD(ccb);
769 ISP_PCMD(ccb) = NULL;
772 * Put the target mode functions here, because some are inlines
775 #ifdef ISP_TARGET_MODE
776 static ISP_INLINE void isp_tmlock(ispsoftc_t *, const char *);
777 static ISP_INLINE void isp_tmunlk(ispsoftc_t *);
778 static ISP_INLINE int is_any_lun_enabled(ispsoftc_t *, int);
779 static ISP_INLINE int is_lun_enabled(ispsoftc_t *, int, lun_id_t);
780 static ISP_INLINE tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t);
781 static ISP_INLINE tstate_t *get_lun_statep_from_tag(ispsoftc_t *, int, uint32_t);
782 static ISP_INLINE void rls_lun_statep(ispsoftc_t *, tstate_t *);
783 static ISP_INLINE inot_private_data_t *get_ntp_from_tagdata(ispsoftc_t *, uint32_t, uint32_t, tstate_t **);
784 static ISP_INLINE atio_private_data_t *isp_get_atpd(ispsoftc_t *, tstate_t *, uint32_t);
785 static ISP_INLINE void isp_put_atpd(ispsoftc_t *, tstate_t *, atio_private_data_t *);
786 static ISP_INLINE inot_private_data_t *isp_get_ntpd(ispsoftc_t *, tstate_t *);
787 static ISP_INLINE inot_private_data_t *isp_find_ntpd(ispsoftc_t *, tstate_t *, uint32_t, uint32_t);
788 static ISP_INLINE void isp_put_ntpd(ispsoftc_t *, tstate_t *, inot_private_data_t *);
789 static cam_status create_lun_state(ispsoftc_t *, int, struct cam_path *, tstate_t **);
790 static void destroy_lun_state(ispsoftc_t *, tstate_t *);
791 static void isp_enable_lun(ispsoftc_t *, union ccb *);
792 static cam_status isp_enable_deferred_luns(ispsoftc_t *, int);
793 static cam_status isp_enable_deferred(ispsoftc_t *, int, lun_id_t);
794 static void isp_disable_lun(ispsoftc_t *, union ccb *);
795 static int isp_enable_target_mode(ispsoftc_t *, int);
796 static int isp_disable_target_mode(ispsoftc_t *, int);
797 static void isp_ledone(ispsoftc_t *, lun_entry_t *);
798 static timeout_t isp_refire_putback_atio;
799 static void isp_complete_ctio(union ccb *);
800 static void isp_target_putback_atio(union ccb *);
801 static void isp_target_start_ctio(ispsoftc_t *, union ccb *);
802 static void isp_handle_platform_atio(ispsoftc_t *, at_entry_t *);
803 static void isp_handle_platform_atio2(ispsoftc_t *, at2_entry_t *);
804 static void isp_handle_platform_atio7(ispsoftc_t *, at7_entry_t *);
805 static void isp_handle_platform_ctio(ispsoftc_t *, void *);
806 static void isp_handle_platform_notify_scsi(ispsoftc_t *, in_entry_t *);
807 static void isp_handle_platform_notify_fc(ispsoftc_t *, in_fcentry_t *);
808 static void isp_handle_platform_notify_24xx(ispsoftc_t *, in_fcentry_24xx_t *);
809 static int isp_handle_platform_target_notify_ack(ispsoftc_t *, isp_notify_t *);
810 static void isp_handle_platform_target_tmf(ispsoftc_t *, isp_notify_t *);
811 static void isp_target_mark_aborted(ispsoftc_t *, union ccb *);
812 static void isp_target_mark_aborted_early(ispsoftc_t *, tstate_t *, uint32_t);
814 static ISP_INLINE void
815 isp_tmlock(ispsoftc_t *isp, const char *msg)
817 while (isp->isp_osinfo.tmbusy) {
818 isp->isp_osinfo.tmwanted = 1;
819 mtx_sleep(isp, &isp->isp_lock, PRIBIO, msg, 0);
821 isp->isp_osinfo.tmbusy = 1;
824 static ISP_INLINE void
825 isp_tmunlk(ispsoftc_t *isp)
827 isp->isp_osinfo.tmbusy = 0;
828 if (isp->isp_osinfo.tmwanted) {
829 isp->isp_osinfo.tmwanted = 0;
834 static ISP_INLINE int
835 is_any_lun_enabled(ispsoftc_t *isp, int bus)
840 for (i = 0; i < LUN_HASH_SIZE; i++) {
841 ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp);
842 if (SLIST_FIRST(lhp))
848 static ISP_INLINE int
849 is_lun_enabled(ispsoftc_t *isp, int bus, lun_id_t lun)
854 ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(lun)], lhp);
855 SLIST_FOREACH(tptr, lhp, next) {
856 if (xpt_path_lun_id(tptr->owner) == lun) {
864 dump_tstates(ispsoftc_t *isp, int bus)
868 tstate_t *tptr = NULL;
870 if (bus >= isp->isp_nchan) {
873 for (i = 0; i < LUN_HASH_SIZE; i++) {
874 ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp);
876 SLIST_FOREACH(tptr, lhp, next) {
877 xpt_print(tptr->owner, "[%d, %d] atio_cnt=%d inot_cnt=%d\n", i, j, tptr->atio_count, tptr->inot_count);
883 static ISP_INLINE tstate_t *
884 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun)
886 tstate_t *tptr = NULL;
890 if (bus < isp->isp_nchan) {
891 for (i = 0; i < LUN_HASH_SIZE; i++) {
892 ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp);
893 SLIST_FOREACH(tptr, lhp, next) {
894 if (xpt_path_lun_id(tptr->owner) == lun) {
904 static ISP_INLINE tstate_t *
905 get_lun_statep_from_tag(ispsoftc_t *isp, int bus, uint32_t tagval)
907 tstate_t *tptr = NULL;
908 atio_private_data_t *atp;
912 if (bus < isp->isp_nchan && tagval != 0) {
913 for (i = 0; i < LUN_HASH_SIZE; i++) {
914 ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp);
915 SLIST_FOREACH(tptr, lhp, next) {
916 atp = isp_get_atpd(isp, tptr, tagval);
917 if (atp && atp->tag == tagval) {
927 static ISP_INLINE inot_private_data_t *
928 get_ntp_from_tagdata(ispsoftc_t *isp, uint32_t tag_id, uint32_t seq_id, tstate_t **rslt)
930 inot_private_data_t *ntp;
935 for (bus = 0; bus < isp->isp_nchan; bus++) {
936 for (i = 0; i < LUN_HASH_SIZE; i++) {
937 ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp);
938 SLIST_FOREACH(tptr, lhp, next) {
939 ntp = isp_find_ntpd(isp, tptr, tag_id, seq_id);
951 static ISP_INLINE void
952 rls_lun_statep(ispsoftc_t *isp, tstate_t *tptr)
954 KASSERT((tptr->hold), ("tptr not held"));
959 isp_tmcmd_restart(ispsoftc_t *isp)
961 inot_private_data_t *ntp;
966 for (bus = 0; bus < isp->isp_nchan; bus++) {
967 for (i = 0; i < LUN_HASH_SIZE; i++) {
968 ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp);
969 SLIST_FOREACH(tptr, lhp, next) {
970 inot_private_data_t *restart_queue = tptr->restart_queue;
971 tptr->restart_queue = NULL;
972 while (restart_queue) {
974 restart_queue = ntp->rd.nt.nt_hba;
976 isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at7_entry_t *)ntp->rd.data)->at_rxid);
977 isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->rd.data);
979 isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at2_entry_t *)ntp->rd.data)->at_rxid);
980 isp_handle_platform_atio2(isp, (at2_entry_t *) ntp->rd.data);
982 isp_put_ntpd(isp, tptr, ntp);
983 if (tptr->restart_queue && restart_queue != NULL) {
984 ntp = tptr->restart_queue;
985 tptr->restart_queue = restart_queue;
986 while (restart_queue->rd.nt.nt_hba) {
987 restart_queue = restart_queue->rd.nt.nt_hba;
989 restart_queue->rd.nt.nt_hba = ntp;
998 static ISP_INLINE atio_private_data_t *
999 isp_get_atpd(ispsoftc_t *isp, tstate_t *tptr, uint32_t tag)
1001 atio_private_data_t *atp;
1006 tptr->atfree = atp->next;
1010 for (atp = tptr->atpool; atp < &tptr->atpool[ATPDPSIZE]; atp++) {
1011 if (atp->tag == tag) {
1018 static ISP_INLINE void
1019 isp_put_atpd(ispsoftc_t *isp, tstate_t *tptr, atio_private_data_t *atp)
1023 atp->next = tptr->atfree;
1028 isp_dump_atpd(ispsoftc_t *isp, tstate_t *tptr)
1030 atio_private_data_t *atp;
1031 const char *states[8] = { "Free", "ATIO", "CAM", "CTIO", "LAST_CTIO", "PDON", "?6", "7" };
1033 for (atp = tptr->atpool; atp < &tptr->atpool[ATPDPSIZE]; atp++) {
1034 if (atp->tag == 0) {
1037 xpt_print(tptr->owner, "ATP: [0x%x] origdlen %u bytes_xfrd %u last_xfr %u lun %u nphdl 0x%04x s_id 0x%06x d_id 0x%06x oxid 0x%04x state %s\n",
1038 atp->tag, atp->orig_datalen, atp->bytes_xfered, atp->last_xframt, atp->lun, atp->nphdl, atp->sid, atp->portid, atp->oxid, states[atp->state & 0x7]);
1043 static ISP_INLINE inot_private_data_t *
1044 isp_get_ntpd(ispsoftc_t *isp, tstate_t *tptr)
1046 inot_private_data_t *ntp;
1049 tptr->ntfree = ntp->next;
1054 static ISP_INLINE inot_private_data_t *
1055 isp_find_ntpd(ispsoftc_t *isp, tstate_t *tptr, uint32_t tag_id, uint32_t seq_id)
1057 inot_private_data_t *ntp;
1058 for (ntp = tptr->ntpool; ntp < &tptr->ntpool[ATPDPSIZE]; ntp++) {
1059 if (ntp->rd.tag_id == tag_id && ntp->rd.seq_id == seq_id) {
1066 static ISP_INLINE void
1067 isp_put_ntpd(ispsoftc_t *isp, tstate_t *tptr, inot_private_data_t *ntp)
1069 ntp->rd.tag_id = ntp->rd.seq_id = 0;
1070 ntp->next = tptr->ntfree;
1075 create_lun_state(ispsoftc_t *isp, int bus, struct cam_path *path, tstate_t **rslt)
1083 lun = xpt_path_lun_id(path);
1084 if (lun != CAM_LUN_WILDCARD) {
1085 if (lun >= ISP_MAX_LUNS(isp)) {
1086 return (CAM_LUN_INVALID);
1089 if (is_lun_enabled(isp, bus, lun)) {
1090 return (CAM_LUN_ALRDY_ENA);
1092 tptr = malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
1094 return (CAM_RESRC_UNAVAIL);
1096 status = xpt_create_path(&tptr->owner, NULL, xpt_path_path_id(path), xpt_path_target_id(path), lun);
1097 if (status != CAM_REQ_CMP) {
1098 free(tptr, M_DEVBUF);
1101 SLIST_INIT(&tptr->atios);
1102 SLIST_INIT(&tptr->inots);
1103 for (i = 0; i < ATPDPSIZE-1; i++) {
1104 tptr->atpool[i].next = &tptr->atpool[i+1];
1105 tptr->ntpool[i].next = &tptr->ntpool[i+1];
1107 tptr->atfree = tptr->atpool;
1108 tptr->ntfree = tptr->ntpool;
1110 ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(xpt_path_lun_id(tptr->owner))], lhp);
1111 SLIST_INSERT_HEAD(lhp, tptr, next);
1113 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, path, "created tstate\n");
1114 return (CAM_REQ_CMP);
1117 static ISP_INLINE void
1118 destroy_lun_state(ispsoftc_t *isp, tstate_t *tptr)
1122 KASSERT((tptr->hold != 0), ("tptr is not held"));
1123 KASSERT((tptr->hold == 1), ("tptr still held (%d)", tptr->hold));
1124 ISP_GET_PC_ADDR(isp, cam_sim_bus(xpt_path_sim(tptr->owner)), lun_hash[LUN_HASH_FUNC(xpt_path_lun_id(tptr->owner))], lhp);
1125 SLIST_REMOVE(lhp, tptr, tstate, next);
1126 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, tptr->owner, "destroyed tstate\n");
1127 xpt_free_path(tptr->owner);
1128 free(tptr, M_DEVBUF);
1135 isp_enable_lun(ispsoftc_t *isp, union ccb *ccb)
1137 tstate_t *tptr = NULL;
1138 int bus, tm_enabled, target_role;
1144 * We only support either a wildcard target/lun or a target ID of zero and a non-wildcard lun
1146 bus = XS_CHANNEL(ccb);
1147 target = ccb->ccb_h.target_id;
1148 lun = ccb->ccb_h.target_lun;
1149 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path, "enabling lun %u\n", lun);
1150 if (target != CAM_TARGET_WILDCARD && target != 0) {
1151 ccb->ccb_h.status = CAM_TID_INVALID;
1155 if (target == CAM_TARGET_WILDCARD && lun != CAM_LUN_WILDCARD) {
1156 ccb->ccb_h.status = CAM_LUN_INVALID;
1161 if (target != CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
1162 ccb->ccb_h.status = CAM_LUN_INVALID;
1166 if (isp->isp_dblev & ISP_LOGTDEBUG0) {
1167 xpt_print(ccb->ccb_h.path, "enabling lun 0x%x on channel %d\n", lun, bus);
1171 * Wait until we're not busy with the lun enables subsystem
1173 isp_tmlock(isp, "isp_enable_lun");
1176 * This is as a good a place as any to check f/w capabilities.
1180 if (ISP_CAP_TMODE(isp) == 0) {
1181 xpt_print(ccb->ccb_h.path, "firmware does not support target mode\n");
1182 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1186 * We *could* handle non-SCCLUN f/w, but we'd have to
1187 * dork with our already fragile enable/disable code.
1189 if (ISP_CAP_SCCFW(isp) == 0) {
1190 xpt_print(ccb->ccb_h.path, "firmware not SCCLUN capable\n");
1191 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1195 target_role = (FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) != 0;
1198 target_role = (SDPARAM(isp, bus)->role & ISP_ROLE_TARGET) != 0;
1202 * Create the state pointer.
1203 * It should not already exist.
1205 tptr = get_lun_statep(isp, bus, lun);
1207 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
1210 ccb->ccb_h.status = create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
1211 if (ccb->ccb_h.status != CAM_REQ_CMP) {
1216 * We have a tricky maneuver to perform here.
1218 * If target mode isn't already enabled here,
1219 * *and* our current role includes target mode,
1220 * we enable target mode here.
1223 ISP_GET_PC(isp, bus, tm_enabled, tm_enabled);
1224 if (tm_enabled == 0 && target_role != 0) {
1225 if (isp_enable_target_mode(isp, bus)) {
1226 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1227 destroy_lun_state(isp, tptr);
1235 * Now check to see whether this bus is in target mode already.
1237 * If not, a later role change into target mode will finish the job.
1239 if (tm_enabled == 0) {
1240 ISP_SET_PC(isp, bus, tm_enable_defer, 1);
1241 ccb->ccb_h.status = CAM_REQ_CMP;
1242 xpt_print(ccb->ccb_h.path, "Target Mode not enabled yet- lun enable deferred\n");
1249 ccb->ccb_h.status = isp_enable_deferred(isp, bus, lun);
1252 if (ccb->ccb_h.status != CAM_REQ_CMP) {
1254 destroy_lun_state(isp, tptr);
1261 rls_lun_statep(isp, tptr);
1265 * And we're outta here....
1272 isp_enable_deferred_luns(ispsoftc_t *isp, int bus)
1274 tstate_t *tptr = NULL;
1278 ISP_GET_PC(isp, bus, tm_enabled, i);
1280 return (CAM_REQ_CMP);
1282 ISP_GET_PC(isp, bus, tm_enable_defer, i);
1284 return (CAM_REQ_CMP);
1287 * If this succeeds, it will set tm_enable
1289 if (isp_enable_target_mode(isp, bus)) {
1290 return (CAM_REQ_CMP_ERR);
1292 isp_tmlock(isp, "isp_enable_deferred_luns");
1293 for (n = i = 0; i < LUN_HASH_SIZE; i++) {
1294 ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp);
1295 SLIST_FOREACH(tptr, lhp, next) {
1297 if (tptr->enabled == 0) {
1298 if (isp_enable_deferred(isp, bus, xpt_path_lun_id(tptr->owner)) == 0) {
1310 return (CAM_REQ_CMP_ERR);
1312 ISP_SET_PC(isp, bus, tm_enable_defer, 0);
1313 return (CAM_REQ_CMP);
1317 isp_enable_deferred(ispsoftc_t *isp, int bus, lun_id_t lun)
1320 int luns_already_enabled = ISP_FC_PC(isp, bus)->tm_luns_enabled;
1322 isp_prt(isp, ISP_LOGTINFO, "%s: bus %d lun %u", __func__, bus, lun);
1323 if (IS_24XX(isp) || (IS_FC(isp) && luns_already_enabled)) {
1324 status = CAM_REQ_CMP;
1326 int cmd_cnt, not_cnt;
1329 cmd_cnt = DFLT_CMND_CNT;
1330 not_cnt = DFLT_INOT_CNT;
1335 status = CAM_REQ_INPROG;
1336 isp->isp_osinfo.rptr = &status;
1337 if (isp_lun_cmd(isp, RQSTYPE_ENABLE_LUN, bus, lun, DFLT_CMND_CNT, DFLT_INOT_CNT)) {
1338 status = CAM_RESRC_UNAVAIL;
1340 mtx_sleep(&status, &isp->isp_lock, PRIBIO, "isp_enable_deferred", 0);
1342 isp->isp_osinfo.rptr = NULL;
1344 if (status == CAM_REQ_CMP) {
1345 ISP_SET_PC(isp, bus, tm_luns_enabled, 1);
1346 isp_prt(isp, ISP_LOGCONFIG|ISP_LOGTINFO, "bus %d lun %u now enabled for target mode", bus, lun);
1352 isp_disable_lun(ispsoftc_t *isp, union ccb *ccb)
1354 tstate_t *tptr = NULL;
1360 bus = XS_CHANNEL(ccb);
1361 target = ccb->ccb_h.target_id;
1362 lun = ccb->ccb_h.target_lun;
1363 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path, "disabling lun %u\n", lun);
1364 if (target != CAM_TARGET_WILDCARD && target != 0) {
1365 ccb->ccb_h.status = CAM_TID_INVALID;
1370 if (target == CAM_TARGET_WILDCARD && lun != CAM_LUN_WILDCARD) {
1371 ccb->ccb_h.status = CAM_LUN_INVALID;
1376 if (target != CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
1377 ccb->ccb_h.status = CAM_LUN_INVALID;
1383 * See if we're busy disabling a lun now.
1385 isp_tmlock(isp, "isp_disable_lun");
1386 status = CAM_REQ_INPROG;
1389 * Find the state pointer.
1391 if ((tptr = get_lun_statep(isp, bus, lun)) == NULL) {
1392 status = CAM_PATH_INVALID;
1397 * If we're a 24XX card, we're done.
1400 status = CAM_REQ_CMP;
1405 * For SCC FW, we only deal with lun zero.
1410 isp->isp_osinfo.rptr = &status;
1411 if (isp_lun_cmd(isp, RQSTYPE_ENABLE_LUN, bus, lun, 0, 0)) {
1412 status = CAM_RESRC_UNAVAIL;
1414 mtx_sleep(ccb, &isp->isp_lock, PRIBIO, "isp_disable_lun", 0);
1416 isp->isp_osinfo.rptr = NULL;
1418 if (status == CAM_REQ_CMP) {
1421 * If we have no more luns enabled for this bus, delete all tracked wwns for it (if we are FC)
1422 * and disable target mode.
1424 if (is_any_lun_enabled(isp, bus) == 0) {
1425 isp_del_all_wwn_entries(isp, bus);
1426 if (isp_disable_target_mode(isp, bus)) {
1427 status = CAM_REQ_CMP_ERR;
1431 ccb->ccb_h.status = status;
1432 if (status == CAM_REQ_CMP) {
1433 xpt_print(ccb->ccb_h.path, "lun now disabled for target mode\n");
1434 destroy_lun_state(isp, tptr);
1437 rls_lun_statep(isp, tptr);
1444 isp_enable_target_mode(ispsoftc_t *isp, int bus)
1448 ISP_GET_PC(isp, bus, tm_enabled, tm_enabled);
1449 if (tm_enabled != 0) {
1454 MBSINIT(&mbs, MBOX_ENABLE_TARGET_MODE, MBLOGALL, 0);
1455 mbs.param[0] = MBOX_ENABLE_TARGET_MODE;
1456 mbs.param[1] = ENABLE_TARGET_FLAG|ENABLE_TQING_FLAG;
1457 mbs.param[2] = bus << 7;
1458 if (isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs) < 0 || mbs.param[0] != MBOX_COMMAND_COMPLETE) {
1459 isp_prt(isp, ISP_LOGERR, "Unable to enable Target Role on Bus %d", bus);
1463 ISP_SET_PC(isp, bus, tm_enabled, 1);
1464 isp_prt(isp, ISP_LOGINFO, "Target Role enabled on Bus %d", bus);
1469 isp_disable_target_mode(ispsoftc_t *isp, int bus)
1473 ISP_GET_PC(isp, bus, tm_enabled, tm_enabled);
1474 if (tm_enabled == 0) {
1479 MBSINIT(&mbs, MBOX_ENABLE_TARGET_MODE, MBLOGALL, 0);
1480 mbs.param[2] = bus << 7;
1481 if (isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs) < 0 || mbs.param[0] != MBOX_COMMAND_COMPLETE) {
1482 isp_prt(isp, ISP_LOGERR, "Unable to disable Target Role on Bus %d", bus);
1486 ISP_SET_PC(isp, bus, tm_enabled, 0);
1487 isp_prt(isp, ISP_LOGINFO, "Target Role disabled onon Bus %d", bus);
1492 isp_ledone(ispsoftc_t *isp, lun_entry_t *lep)
1496 rptr = isp->isp_osinfo.rptr;
1497 if (lep->le_status != LUN_OK) {
1498 isp_prt(isp, ISP_LOGERR, "ENABLE/MODIFY LUN returned 0x%x", lep->le_status);
1500 *rptr = CAM_REQ_CMP_ERR;
1505 *rptr = CAM_REQ_CMP;
1512 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb)
1516 atio_private_data_t *atp;
1517 struct ccb_scsiio *cso = &ccb->csio;
1518 uint32_t dmaresult, handle;
1519 uint8_t local[QENTRY_LEN];
1522 * Do some sanity checks.
1524 if (cso->dxfer_len == 0) {
1525 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
1526 xpt_print(ccb->ccb_h.path, "a data transfer length of zero but no status to send is wrong\n");
1527 ccb->ccb_h.status = CAM_REQ_INVALID;
1533 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb));
1535 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), CAM_LUN_WILDCARD);
1537 xpt_print(ccb->ccb_h.path, "%s: [0x%x] cannot find tstate pointer in %s\n", __func__, cso->tag_id);
1538 dump_tstates(isp, XS_CHANNEL(ccb));
1539 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
1545 atp = isp_get_atpd(isp, tptr, cso->tag_id);
1547 xpt_print(ccb->ccb_h.path, "%s: [0x%x] cannot find private data adjunct\n", __func__, cso->tag_id);
1548 isp_dump_atpd(isp, tptr);
1549 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1554 xpt_print(ccb->ccb_h.path, "%s: [0x%x] stopping sending a CTIO for a dead command\n", __func__, cso->tag_id);
1555 ccb->ccb_h.status = CAM_REQ_ABORTED;
1561 * Check to make sure we're still in target mode.
1563 if ((FCPARAM(isp, XS_CHANNEL(ccb))->role & ISP_ROLE_TARGET) == 0) {
1564 xpt_print(ccb->ccb_h.path, "%s: [0x%x] stopping sending a CTIO because we're no longer in target mode\n", __func__, cso->tag_id);
1565 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1571 * Get some resources
1573 if (isp_get_pcmd(isp, ccb)) {
1574 rls_lun_statep(isp, tptr);
1575 xpt_print(ccb->ccb_h.path, "out of PCMDs\n");
1576 cam_freeze_devq(ccb->ccb_h.path);
1577 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0);
1578 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1582 qe = isp_getrqentry(isp);
1584 xpt_print(ccb->ccb_h.path, rqo, __func__);
1585 cam_freeze_devq(ccb->ccb_h.path);
1586 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0);
1587 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1590 memset(local, 0, QENTRY_LEN);
1593 * We're either moving data or completing a command here.
1596 ct7_entry_t *cto = (ct7_entry_t *) local;
1598 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1599 cto->ct_header.rqs_entry_count = 1;
1600 cto->ct_header.rqs_seqno = 1;
1601 cto->ct_nphdl = atp->nphdl;
1602 cto->ct_rxid = atp->tag;
1603 cto->ct_iid_lo = atp->portid;
1604 cto->ct_iid_hi = atp->portid >> 16;
1605 cto->ct_oxid = atp->oxid;
1606 cto->ct_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(ccb));
1607 cto->ct_scsi_status = cso->scsi_status;
1608 cto->ct_timeout = 120;
1609 cto->ct_flags = atp->tattr << CT7_TASK_ATTR_SHIFT;
1610 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1611 cto->ct_flags |= CT7_SENDSTATUS;
1613 if (cso->dxfer_len == 0) {
1614 cto->ct_flags |= CT7_FLAG_MODE1 | CT7_NO_DATA;
1615 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1616 int m = min(cso->sense_len, sizeof (struct scsi_sense_data));
1617 cto->rsp.m1.ct_resplen = cto->ct_senselen = min(m, MAXRESPLEN_24XX);
1618 memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, cto->ct_senselen);
1619 cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8);
1622 cto->ct_flags |= CT7_FLAG_MODE0;
1623 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1624 cto->ct_flags |= CT7_DATA_IN;
1626 cto->ct_flags |= CT7_DATA_OUT;
1628 cto->rsp.m0.reloff = atp->bytes_xfered;
1630 * Don't overrun the limits placed on us
1632 if (atp->bytes_xfered + cso->dxfer_len > atp->orig_datalen) {
1633 cso->dxfer_len = atp->orig_datalen - atp->bytes_xfered;
1635 atp->last_xframt = cso->dxfer_len;
1636 cto->rsp.m0.ct_xfrlen = cso->dxfer_len;
1638 if (cto->ct_flags & CT7_SENDSTATUS) {
1639 int lvl = (cso->scsi_status)? ISP_LOGTINFO : ISP_LOGTDEBUG0;
1640 cto->ct_resid = atp->orig_datalen - (atp->bytes_xfered + cso->dxfer_len);
1641 if (cto->ct_resid < 0) {
1642 cto->ct_scsi_status |= (FCP_RESID_OVERFLOW << 8);
1643 } else if (cto->ct_resid > 0) {
1644 cto->ct_scsi_status |= (FCP_RESID_UNDERFLOW << 8);
1646 atp->state = ATPD_STATE_LAST_CTIO;
1647 ISP_PATH_PRT(isp, lvl, cso->ccb_h.path, "%s: CTIO7[%x] CDB0=%x scsi status %x flags %x resid %d xfrlen %u offset %u\n", __func__, cto->ct_rxid,
1648 atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid, cso->dxfer_len, atp->bytes_xfered);
1651 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, cso->ccb_h.path, "%s: CTIO7[%x] flags %x xfrlen %u offset %u\n", __func__, cto->ct_rxid, cto->ct_flags,
1652 cso->dxfer_len, atp->bytes_xfered);
1653 atp->state = ATPD_STATE_CTIO;
1655 } else if (IS_FC(isp)) {
1656 ct2_entry_t *cto = (ct2_entry_t *) local;
1658 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1659 cto->ct_header.rqs_entry_count = 1;
1660 cto->ct_header.rqs_seqno = 1;
1661 if (ISP_CAP_2KLOGIN(isp) == 0) {
1662 ((ct2e_entry_t *)cto)->ct_iid = cso->init_id;
1664 cto->ct_iid = cso->init_id;
1665 if (ISP_CAP_SCCFW(isp) == 0) {
1666 cto->ct_lun = ccb->ccb_h.target_lun;
1671 cto->ct_rxid = cso->tag_id;
1672 if (cso->dxfer_len == 0) {
1673 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA | CT2_SENDSTATUS;
1674 cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1675 cto->ct_resid = atp->orig_datalen - atp->bytes_xfered;
1676 if (cto->ct_resid < 0) {
1677 cto->rsp.m1.ct_scsi_status |= CT2_DATA_OVER;
1678 } else if (cto->ct_resid > 0) {
1679 cto->rsp.m1.ct_scsi_status |= CT2_DATA_UNDER;
1681 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1682 int m = min(cso->sense_len, MAXRESPLEN);
1683 memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, m);
1684 cto->rsp.m1.ct_senselen = m;
1685 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1686 } else if (cso->scsi_status == SCSI_STATUS_CHECK_COND) {
1690 xpt_print(ccb->ccb_h.path, "CHECK CONDITION being sent without associated SENSE DATA for CDB=0x%x\n", atp->cdb0);
1693 cto->ct_flags |= CT2_FLAG_MODE0;
1694 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1695 cto->ct_flags |= CT2_DATA_IN;
1697 cto->ct_flags |= CT2_DATA_OUT;
1699 cto->ct_reloff = atp->bytes_xfered;
1700 cto->rsp.m0.ct_xfrlen = cso->dxfer_len;
1702 * Don't overrun the limits placed on us
1704 if (atp->bytes_xfered + cso->dxfer_len > atp->orig_datalen) {
1705 cso->dxfer_len = atp->orig_datalen - atp->bytes_xfered;
1707 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1708 cto->ct_flags |= CT2_SENDSTATUS;
1709 cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1710 cto->ct_resid = atp->orig_datalen - (atp->bytes_xfered + cso->dxfer_len);
1711 if (cto->ct_resid < 0) {
1712 cto->rsp.m0.ct_scsi_status |= CT2_DATA_OVER;
1713 } else if (cto->ct_resid > 0) {
1714 cto->rsp.m0.ct_scsi_status |= CT2_DATA_UNDER;
1717 atp->last_xframt = cso->dxfer_len;
1720 * If we're sending data and status back together,
1721 * we can't also send back sense data as well.
1723 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1726 if (cto->ct_flags & CT2_SENDSTATUS) {
1727 int lvl = (cso->scsi_status)? ISP_LOGTINFO : ISP_LOGTDEBUG0;
1728 cto->ct_flags |= CT2_CCINCR;
1729 atp->state = ATPD_STATE_LAST_CTIO;
1730 ISP_PATH_PRT(isp, lvl, cso->ccb_h.path, "%s: CTIO2[%x] CDB0=%x scsi status %x flags %x resid %d xfrlen %u offset %u\n", __func__, cto->ct_rxid,
1731 atp->cdb0, cto->rsp.m0.ct_scsi_status, cto->ct_flags, cto->ct_resid, cso->dxfer_len, atp->bytes_xfered);
1734 atp->state = ATPD_STATE_CTIO;
1735 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: CTIO2[%x] flags %x xfrlen %u offset %u\n", __func__, cto->ct_rxid, cto->ct_flags,
1736 cso->dxfer_len, atp->bytes_xfered);
1738 cto->ct_timeout = 10;
1740 ct_entry_t *cto = (ct_entry_t *) local;
1742 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1743 cto->ct_header.rqs_entry_count = 1;
1744 cto->ct_header.rqs_seqno = 1;
1745 cto->ct_iid = cso->init_id;
1746 cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1747 cto->ct_tgt = ccb->ccb_h.target_id;
1748 cto->ct_lun = ccb->ccb_h.target_lun;
1749 cto->ct_fwhandle = cso->tag_id >> 16;
1750 if (AT_HAS_TAG(cso->tag_id)) {
1751 cto->ct_tag_val = cso->tag_id;
1752 cto->ct_flags |= CT_TQAE;
1754 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1755 cto->ct_flags |= CT_NODISC;
1757 if (cso->dxfer_len == 0) {
1758 cto->ct_flags |= CT_NO_DATA;
1759 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1760 cto->ct_flags |= CT_DATA_IN;
1762 cto->ct_flags |= CT_DATA_OUT;
1764 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1765 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1766 cto->ct_scsi_status = cso->scsi_status;
1767 cto->ct_resid = cso->resid;
1768 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: CTIO[%x] scsi status %x resid %d tag_id %x\n", __func__,
1769 cto->ct_fwhandle, cso->scsi_status, cso->resid, cso->tag_id);
1771 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1772 cto->ct_timeout = 10;
1775 if (isp_allocate_xs_tgt(isp, ccb, &handle)) {
1776 xpt_print(ccb->ccb_h.path, "No XFLIST pointers for %s\n", __func__);
1777 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1783 * Call the dma setup routines for this entry (and any subsequent
1784 * CTIOs) if there's data to move, and then tell the f/w it's got
1785 * new things to play with. As with isp_start's usage of DMA setup,
1786 * any swizzling is done in the machine dependent layer. Because
1787 * of this, we put the request onto the queue area first in native
1792 ct7_entry_t *cto = (ct7_entry_t *) local;
1793 cto->ct_syshandle = handle;
1794 } else if (IS_FC(isp)) {
1795 ct2_entry_t *cto = (ct2_entry_t *) local;
1796 cto->ct_syshandle = handle;
1798 ct_entry_t *cto = (ct_entry_t *) local;
1799 cto->ct_syshandle = handle;
1802 dmaresult = ISP_DMASETUP(isp, cso, (ispreq_t *) local);
1803 if (dmaresult == CMD_QUEUED) {
1805 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1806 rls_lun_statep(isp, tptr);
1809 if (dmaresult == CMD_EAGAIN) {
1810 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1812 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1814 isp_destroy_tgt_handle(isp, handle);
1816 rls_lun_statep(isp, tptr);
1817 isp_free_pcmd(isp, ccb);
1822 isp_refire_putback_atio(void *arg)
1824 union ccb *ccb = arg;
1825 ispsoftc_t *isp = XS_ISP(ccb);
1827 isp_target_putback_atio(ccb);
1832 isp_target_putback_atio(union ccb *ccb)
1835 struct ccb_scsiio *cso;
1840 qe = isp_getrqentry(isp);
1842 xpt_print(ccb->ccb_h.path, rqo, __func__);
1843 (void) timeout(isp_refire_putback_atio, ccb, 10);
1846 memset(qe, 0, QENTRY_LEN);
1849 at2_entry_t local, *at = &local;
1850 ISP_MEMZERO(at, sizeof (at2_entry_t));
1851 at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1852 at->at_header.rqs_entry_count = 1;
1853 if (ISP_CAP_SCCFW(isp)) {
1854 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1856 at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1858 at->at_status = CT_OK;
1859 at->at_rxid = cso->tag_id;
1860 at->at_iid = cso->ccb_h.target_id;
1861 isp_put_atio2(isp, at, qe);
1863 at_entry_t local, *at = &local;
1864 ISP_MEMZERO(at, sizeof (at_entry_t));
1865 at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1866 at->at_header.rqs_entry_count = 1;
1867 at->at_iid = cso->init_id;
1868 at->at_iid |= XS_CHANNEL(ccb) << 7;
1869 at->at_tgt = cso->ccb_h.target_id;
1870 at->at_lun = cso->ccb_h.target_lun;
1871 at->at_status = CT_OK;
1872 at->at_tag_val = AT_GET_TAG(cso->tag_id);
1873 at->at_handle = AT_GET_HANDLE(cso->tag_id);
1874 isp_put_atio(isp, at, qe);
1876 ISP_TDQE(isp, "isp_target_putback_atio", isp->isp_reqidx, qe);
1877 ISP_SYNC_REQUEST(isp);
1878 isp_complete_ctio(ccb);
1882 isp_complete_ctio(union ccb *ccb)
1884 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1885 ccb->ccb_h.status |= CAM_REQ_CMP;
1887 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1888 isp_free_pcmd(XS_ISP(ccb), ccb);
1893 * Handle ATIO stuff that the generic code can't.
1894 * This means handling CDBs.
1898 isp_handle_platform_atio(ispsoftc_t *isp, at_entry_t *aep)
1902 struct ccb_accept_tio *atiop;
1903 atio_private_data_t *atp;
1906 * The firmware status (except for the QLTM_SVALID bit)
1907 * indicates why this ATIO was sent to us.
1909 * If QLTM_SVALID is set, the firmware has recommended Sense Data.
1911 * If the DISCONNECTS DISABLED bit is set in the flags field,
1912 * we're still connected on the SCSI bus.
1914 status = aep->at_status;
1915 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1917 * Bus Phase Sequence error. We should have sense data
1918 * suggested by the f/w. I'm not sure quite yet what
1919 * to do about this for CAM.
1921 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1922 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1925 if ((status & ~QLTM_SVALID) != AT_CDB) {
1926 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform", status);
1927 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1931 bus = GET_BUS_VAL(aep->at_iid);
1932 tptr = get_lun_statep(isp, bus, aep->at_lun);
1934 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1937 * Because we can't autofeed sense data back with
1938 * a command for parallel SCSI, we can't give back
1939 * a CHECK CONDITION. We'll give back a BUSY status
1940 * instead. This works out okay because the only
1941 * time we should, in fact, get this, is in the
1942 * case that somebody configured us without the
1943 * blackhole driver, so they get what they deserve.
1945 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1950 atp = isp_get_atpd(isp, tptr, 0);
1951 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1952 if (atiop == NULL || atp == NULL) {
1954 * Because we can't autofeed sense data back with
1955 * a command for parallel SCSI, we can't give back
1956 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1957 * instead. This works out okay because the only time we
1958 * should, in fact, get this, is in the case that we've
1961 xpt_print(tptr->owner, "no %s for lun %d from initiator %d\n", (atp == NULL && atiop == NULL)? "ATIOs *or* ATPS" :
1962 ((atp == NULL)? "ATPs" : "ATIOs"), aep->at_lun, aep->at_iid);
1963 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1965 isp_put_atpd(isp, tptr, atp);
1967 rls_lun_statep(isp, tptr);
1970 atp->tag = aep->at_tag_val;
1971 if (atp->tag == 0) {
1974 atp->state = ATPD_STATE_ATIO;
1975 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1977 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, atiop->ccb_h.path, "Take FREE ATIO count now %d\n", tptr->atio_count);
1978 atiop->ccb_h.target_id = aep->at_tgt;
1979 atiop->ccb_h.target_lun = aep->at_lun;
1980 if (aep->at_flags & AT_NODISC) {
1981 atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1983 atiop->ccb_h.flags = 0;
1986 if (status & QLTM_SVALID) {
1987 size_t amt = ISP_MIN(QLTM_SENSELEN, sizeof (atiop->sense_data));
1988 atiop->sense_len = amt;
1989 ISP_MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1991 atiop->sense_len = 0;
1994 atiop->init_id = GET_IID_VAL(aep->at_iid);
1995 atiop->cdb_len = aep->at_cdblen;
1996 ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1997 atiop->ccb_h.status = CAM_CDB_RECVD;
1999 * Construct a tag 'id' based upon tag value (which may be 0..255)
2000 * and the handle (which we have to preserve).
2002 atiop->tag_id = atp->tag;
2003 if (aep->at_flags & AT_TQAE) {
2004 atiop->tag_action = aep->at_tag_type;
2005 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
2007 atp->orig_datalen = 0;
2008 atp->bytes_xfered = 0;
2009 atp->last_xframt = 0;
2010 atp->lun = aep->at_lun;
2011 atp->nphdl = aep->at_iid;
2012 atp->portid = PORT_NONE;
2014 atp->cdb0 = atiop->cdb_io.cdb_bytes[0];
2015 atp->tattr = aep->at_tag_type;
2016 atp->state = ATPD_STATE_CAM;
2017 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, tptr->owner, "ATIO[%x] CDB=0x%x lun %d\n", aep->at_tag_val, atp->cdb0, atp->lun);
2018 rls_lun_statep(isp, tptr);
2022 isp_handle_platform_atio2(ispsoftc_t *isp, at2_entry_t *aep)
2027 struct ccb_accept_tio *atiop;
2029 atio_private_data_t *atp;
2030 inot_private_data_t *ntp;
2033 * The firmware status (except for the QLTM_SVALID bit)
2034 * indicates why this ATIO was sent to us.
2036 * If QLTM_SVALID is set, the firmware has recommended Sense Data.
2038 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
2039 isp_prt(isp, ISP_LOGWARN, "bogus atio (0x%x) leaked to platform", aep->at_status);
2040 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
2044 if (ISP_CAP_SCCFW(isp)) {
2045 lun = aep->at_scclun;
2049 if (ISP_CAP_2KLOGIN(isp)) {
2050 nphdl = ((at2e_entry_t *)aep)->at_iid;
2052 nphdl = aep->at_iid;
2054 tptr = get_lun_statep(isp, 0, lun);
2056 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
2058 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] no state pointer for lun %d", aep->at_rxid, lun);
2059 isp_endcmd(isp, aep, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0);
2065 * Start any commands pending resources first.
2067 if (tptr->restart_queue) {
2068 inot_private_data_t *restart_queue = tptr->restart_queue;
2069 tptr->restart_queue = NULL;
2070 while (restart_queue) {
2071 ntp = restart_queue;
2072 restart_queue = ntp->rd.nt.nt_hba;
2073 isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at2_entry_t *)ntp->rd.data)->at_rxid);
2074 isp_handle_platform_atio2(isp, (at2_entry_t *) ntp->rd.data);
2075 isp_put_ntpd(isp, tptr, ntp);
2077 * If a recursion caused the restart queue to start to fill again,
2078 * stop and splice the new list on top of the old list and restore
2079 * it and go to noresrc.
2081 if (tptr->restart_queue) {
2082 ntp = tptr->restart_queue;
2083 tptr->restart_queue = restart_queue;
2084 while (restart_queue->rd.nt.nt_hba) {
2085 restart_queue = restart_queue->rd.nt.nt_hba;
2087 restart_queue->rd.nt.nt_hba = ntp;
2093 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
2094 if (atiop == NULL) {
2098 atp = isp_get_atpd(isp, tptr, 0);
2103 atp->tag = aep->at_rxid;
2104 atp->state = ATPD_STATE_ATIO;
2105 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
2107 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, atiop->ccb_h.path, "Take FREE ATIO count now %d\n", tptr->atio_count);
2108 atiop->ccb_h.target_id = FCPARAM(isp, 0)->isp_loopid;
2109 atiop->ccb_h.target_lun = lun;
2112 * We don't get 'suggested' sense data as we do with SCSI cards.
2114 atiop->sense_len = 0;
2115 if (ISP_CAP_2KLOGIN(isp)) {
2117 * NB: We could not possibly have 2K logins if we
2118 * NB: also did not have SCC FW.
2120 atiop->init_id = ((at2e_entry_t *)aep)->at_iid;
2122 atiop->init_id = aep->at_iid;
2126 * If we're not in the port database, add ourselves.
2128 if (!IS_2100(isp) && isp_find_pdb_by_loopid(isp, 0, atiop->init_id, &lp) == 0) {
2130 (((uint64_t) aep->at_wwpn[0]) << 48) |
2131 (((uint64_t) aep->at_wwpn[1]) << 32) |
2132 (((uint64_t) aep->at_wwpn[2]) << 16) |
2133 (((uint64_t) aep->at_wwpn[3]) << 0);
2135 * However, make sure we delete ourselves if otherwise
2136 * we were there but at a different loop id.
2138 if (isp_find_pdb_by_wwn(isp, 0, iid, &lp)) {
2139 isp_del_wwn_entry(isp, 0, iid, lp->handle, lp->portid);
2141 isp_add_wwn_entry(isp, 0, iid, atiop->init_id, PORT_ANY);
2143 atiop->cdb_len = ATIO2_CDBLEN;
2144 ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
2145 atiop->ccb_h.status = CAM_CDB_RECVD;
2146 atiop->tag_id = atp->tag;
2147 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
2148 case ATIO2_TC_ATTR_SIMPLEQ:
2149 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
2150 atiop->tag_action = MSG_SIMPLE_Q_TAG;
2152 case ATIO2_TC_ATTR_HEADOFQ:
2153 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
2154 atiop->tag_action = MSG_HEAD_OF_Q_TAG;
2156 case ATIO2_TC_ATTR_ORDERED:
2157 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
2158 atiop->tag_action = MSG_ORDERED_Q_TAG;
2160 case ATIO2_TC_ATTR_ACAQ: /* ?? */
2161 case ATIO2_TC_ATTR_UNTAGGED:
2163 atiop->tag_action = 0;
2167 atp->orig_datalen = aep->at_datalen;
2168 atp->bytes_xfered = 0;
2169 atp->last_xframt = 0;
2171 atp->nphdl = atiop->init_id;
2172 atp->sid = PORT_ANY;
2173 atp->oxid = aep->at_oxid;
2174 atp->cdb0 = aep->at_cdb[0];
2175 atp->tattr = aep->at_taskflags & ATIO2_TC_ATTR_MASK;
2176 atp->state = ATPD_STATE_CAM;
2177 xpt_done((union ccb *)atiop);
2178 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, tptr->owner, "ATIO2[%x] CDB=0x%x lun %d datalen %u\n", aep->at_rxid, atp->cdb0, lun, atp->orig_datalen);
2179 rls_lun_statep(isp, tptr);
2182 ntp = isp_get_ntpd(isp, tptr);
2184 rls_lun_statep(isp, tptr);
2185 isp_endcmd(isp, aep, nphdl, 0, SCSI_STATUS_BUSY, 0);
2188 memcpy(ntp->rd.data, aep, QENTRY_LEN);
2189 ntp->rd.nt.nt_hba = tptr->restart_queue;
2190 tptr->restart_queue = ntp;
2191 rls_lun_statep(isp, tptr);
2195 isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep)
2198 uint16_t lun, chan, nphdl = NIL_HANDLE;
2200 uint64_t wwn = INI_NONE;
2203 struct ccb_accept_tio *atiop;
2204 atio_private_data_t *atp = NULL;
2205 atio_private_data_t *oatp;
2206 inot_private_data_t *ntp;
2208 did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2];
2209 sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2];
2210 lun = (aep->at_cmnd.fcp_cmnd_lun[0] << 8) | aep->at_cmnd.fcp_cmnd_lun[1];
2213 * Find the N-port handle, and Virtual Port Index for this command.
2215 * If we can't, we're somewhat in trouble because we can't actually respond w/o that information.
2216 * We also, as a matter of course, need to know the WWN of the initiator too.
2218 if (ISP_CAP_MULTI_ID(isp)) {
2220 * Find the right channel based upon D_ID
2222 isp_find_chan_by_did(isp, did, &chan);
2224 if (chan == ISP_NOCHAN) {
2228 * If we don't recognizer our own D_DID, terminate the exchange, unless we're within 2 seconds of startup
2229 * It's a bit tricky here as we need to stash this command *somewhere*.
2232 if (NANOTIME_SUB(&isp->isp_init_time, &now) > 2000000000ULL) {
2233 isp_prt(isp, ISP_LOGWARN, "%s: [RX_ID 0x%x] D_ID %x not found on any channel- dropping", __func__, aep->at_rxid, did);
2234 isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN, ECMD_TERMINATE, 0);
2237 tptr = get_lun_statep(isp, 0, 0);
2239 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
2241 isp_prt(isp, ISP_LOGWARN, "%s: [RX_ID 0x%x] D_ID %x not found on any channel and no tptr- dropping", __func__, aep->at_rxid, did);
2242 isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN, ECMD_TERMINATE, 0);
2246 isp_prt(isp, ISP_LOGWARN, "%s: [RX_ID 0x%x] D_ID %x not found on any channel- deferring", __func__, aep->at_rxid, did);
2249 isp_prt(isp, ISP_LOGTDEBUG0, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x", __func__, aep->at_rxid, did, chan, sid);
2255 * Find the PDB entry for this initiator
2257 if (isp_find_pdb_by_sid(isp, chan, sid, &lp) == 0) {
2259 * If we're not in the port database terminate the exchange.
2261 isp_prt(isp, ISP_LOGTINFO, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x wasn't in PDB already",
2262 __func__, aep->at_rxid, did, chan, sid);
2263 isp_endcmd(isp, aep, NIL_HANDLE, chan, ECMD_TERMINATE, 0);
2270 * Get the tstate pointer
2272 tptr = get_lun_statep(isp, chan, lun);
2274 tptr = get_lun_statep(isp, chan, CAM_LUN_WILDCARD);
2276 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] no state pointer for lun %d or wildcard", aep->at_rxid, lun);
2277 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0);
2283 * Start any commands pending resources first.
2285 if (tptr->restart_queue) {
2286 inot_private_data_t *restart_queue = tptr->restart_queue;
2287 tptr->restart_queue = NULL;
2288 while (restart_queue) {
2289 ntp = restart_queue;
2290 restart_queue = ntp->rd.nt.nt_hba;
2291 isp_prt(isp, ISP_LOGTDEBUG0, "%s: restarting resrc deprived %x", __func__, ((at7_entry_t *)ntp->rd.data)->at_rxid);
2292 isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->rd.data);
2293 isp_put_ntpd(isp, tptr, ntp);
2295 * If a recursion caused the restart queue to start to fill again,
2296 * stop and splice the new list on top of the old list and restore
2297 * it and go to noresrc.
2299 if (tptr->restart_queue) {
2300 isp_prt(isp, ISP_LOGTDEBUG0, "%s: restart queue refilling", __func__);
2301 if (restart_queue) {
2302 ntp = tptr->restart_queue;
2303 tptr->restart_queue = restart_queue;
2304 while (restart_queue->rd.nt.nt_hba) {
2305 restart_queue = restart_queue->rd.nt.nt_hba;
2307 restart_queue->rd.nt.nt_hba = ntp;
2315 * If the f/w is out of resources, just send a BUSY status back.
2317 if (aep->at_rxid == AT7_NORESRC_RXID) {
2318 rls_lun_statep(isp, tptr);
2319 isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0);
2324 * If we're out of resources, just send a BUSY status back.
2326 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
2327 if (atiop == NULL) {
2328 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atios", aep->at_rxid);
2332 atp = isp_get_atpd(isp, tptr, 0);
2334 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atps", aep->at_rxid);
2337 oatp = isp_get_atpd(isp, tptr, aep->at_rxid);
2339 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] tag wraparound in isp_handle_platforms_atio7 (N-Port Handle 0x%04x S_ID 0x%04x OX_ID 0x%04x) oatp state %d\n",
2340 aep->at_rxid, nphdl, sid, aep->at_hdr.ox_id, oatp->state);
2342 * It's not a "no resource" condition- but we can treat it like one
2346 atp->tag = aep->at_rxid;
2347 atp->state = ATPD_STATE_ATIO;
2348 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
2350 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, atiop->ccb_h.path, "Take FREE ATIO count now %d\n", tptr->atio_count);
2351 atiop->init_id = nphdl;
2352 atiop->ccb_h.target_id = FCPARAM(isp, chan)->isp_loopid;
2353 atiop->ccb_h.target_lun = lun;
2354 atiop->sense_len = 0;
2355 cdbxlen = aep->at_cmnd.fcp_cmnd_alen_datadir >> FCP_CMND_ADDTL_CDBLEN_SHIFT;
2357 isp_prt(isp, ISP_LOGWARN, "additional CDBLEN ignored");
2359 cdbxlen = sizeof (aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb);
2360 ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb, cdbxlen);
2361 atiop->cdb_len = cdbxlen;
2362 atiop->ccb_h.status = CAM_CDB_RECVD;
2363 atiop->tag_id = atp->tag;
2364 switch (aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK) {
2365 case FCP_CMND_TASK_ATTR_SIMPLE:
2366 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
2367 atiop->tag_action = MSG_SIMPLE_Q_TAG;
2369 case FCP_CMND_TASK_ATTR_HEAD:
2370 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
2371 atiop->tag_action = MSG_HEAD_OF_Q_TAG;
2373 case FCP_CMND_TASK_ATTR_ORDERED:
2374 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
2375 atiop->tag_action = MSG_ORDERED_Q_TAG;
2379 case FCP_CMND_TASK_ATTR_ACA:
2380 case FCP_CMND_TASK_ATTR_UNTAGGED:
2381 atiop->tag_action = 0;
2384 atp->orig_datalen = aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl;
2385 atp->bytes_xfered = 0;
2386 atp->last_xframt = 0;
2390 atp->oxid = aep->at_hdr.ox_id;
2391 atp->rxid = aep->at_hdr.rx_id;
2392 atp->cdb0 = atiop->cdb_io.cdb_bytes[0];
2393 atp->tattr = aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK;
2394 atp->state = ATPD_STATE_CAM;
2395 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, tptr->owner, "ATIO7[%x] CDB=0x%x lun %d datalen %u\n", aep->at_rxid, atp->cdb0, lun, atp->orig_datalen);
2396 xpt_done((union ccb *)atiop);
2397 rls_lun_statep(isp, tptr);
2401 isp_put_atpd(isp, tptr, atp);
2403 ntp = isp_get_ntpd(isp, tptr);
2405 rls_lun_statep(isp, tptr);
2406 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0);
2409 memcpy(ntp->rd.data, aep, QENTRY_LEN);
2410 ntp->rd.nt.nt_hba = tptr->restart_queue;
2411 tptr->restart_queue = ntp;
2412 rls_lun_statep(isp, tptr);
2416 isp_handle_platform_ctio(ispsoftc_t *isp, void *arg)
2419 int sentstatus, ok, notify_cam, resid = 0;
2420 tstate_t *tptr = NULL;
2421 atio_private_data_t *atp = NULL;
2423 uint32_t tval, handle;
2426 * CTIO handles are 16 bits.
2427 * CTIO2 and CTIO7 are 32 bits.
2431 handle = ((ct_entry_t *)arg)->ct_syshandle;
2433 handle = ((ct2_entry_t *)arg)->ct_syshandle;
2435 ccb = isp_find_xs_tgt(isp, handle);
2437 isp_print_bytes(isp, "null ccb in isp_handle_platform_ctio", QENTRY_LEN, arg);
2440 isp_destroy_tgt_handle(isp, handle);
2441 bus = XS_CHANNEL(ccb);
2442 tptr = get_lun_statep(isp, bus, XS_LUN(ccb));
2444 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
2446 KASSERT((tptr != NULL), ("cannot get state pointer"));
2447 if (isp->isp_nactive) {
2451 ct7_entry_t *ct = arg;
2453 atp = isp_get_atpd(isp, tptr, ct->ct_rxid);
2455 rls_lun_statep(isp, tptr);
2456 isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x after I/O", __func__, ct->ct_rxid);
2460 sentstatus = ct->ct_flags & CT7_SENDSTATUS;
2461 ok = (ct->ct_nphdl == CT7_OK);
2462 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
2463 ccb->ccb_h.status |= CAM_SENT_SENSE;
2465 notify_cam = ct->ct_header.rqs_seqno & 0x1;
2466 if ((ct->ct_flags & CT7_DATAMASK) != CT7_NO_DATA) {
2467 resid = ct->ct_resid;
2468 atp->bytes_xfered += (atp->last_xframt - resid);
2469 atp->last_xframt = 0;
2471 if (ct->ct_nphdl == CT_HBA_RESET) {
2475 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2477 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
2480 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO7[%x] sts 0x%x flg 0x%x sns %d resid %d %s", __func__,
2481 ct->ct_rxid, ct->ct_nphdl, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID");
2482 atp->state = ATPD_STATE_PDON; /* XXX: should really come after isp_complete_ctio */
2483 } else if (IS_FC(isp)) {
2484 ct2_entry_t *ct = arg;
2486 atp = isp_get_atpd(isp, tptr, ct->ct_rxid);
2488 rls_lun_statep(isp, tptr);
2489 isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x after I/O", __func__, ct->ct_rxid);
2492 sentstatus = ct->ct_flags & CT2_SENDSTATUS;
2493 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
2494 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
2495 ccb->ccb_h.status |= CAM_SENT_SENSE;
2497 notify_cam = ct->ct_header.rqs_seqno & 0x1;
2498 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
2499 resid = ct->ct_resid;
2500 atp->bytes_xfered += (atp->last_xframt - resid);
2501 atp->last_xframt = 0;
2503 if (ct->ct_status == CT_HBA_RESET) {
2507 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2509 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
2511 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s", __func__,
2512 ct->ct_rxid, ct->ct_status, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID");
2514 atp->state = ATPD_STATE_PDON; /* XXX: should really come after isp_complete_ctio */
2516 ct_entry_t *ct = arg;
2517 sentstatus = ct->ct_flags & CT_SENDSTATUS;
2518 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
2520 * We *ought* to be able to get back to the original ATIO
2521 * here, but for some reason this gets lost. It's just as
2522 * well because it's squirrelled away as part of periph
2525 * We can live without it as long as we continue to use
2526 * the auto-replenish feature for CTIOs.
2528 notify_cam = ct->ct_header.rqs_seqno & 0x1;
2529 if (ct->ct_status == (CT_HBA_RESET & 0xff)) {
2533 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2535 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
2536 } else if (ct->ct_status & QLTM_SVALID) {
2537 char *sp = (char *)ct;
2538 sp += CTIO_SENSE_OFFSET;
2539 ccb->csio.sense_len = min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
2540 ISP_MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
2541 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2543 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
2544 resid = ct->ct_resid;
2546 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO[%x] tag %x S_ID 0x%x lun %d sts %x flg %x resid %d %s", __func__,
2547 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun, ct->ct_status, ct->ct_flags, resid, sentstatus? "FIN" : "MID");
2548 tval = ct->ct_fwhandle;
2550 ccb->csio.resid += resid;
2553 * We're here either because intermediate data transfers are done
2554 * and/or the final status CTIO (which may have joined with a
2555 * Data Transfer) is done.
2557 * In any case, for this platform, the upper layers figure out
2558 * what to do next, so all we do here is collect status and
2559 * pass information along. Any DMA handles have already been
2562 if (notify_cam == 0) {
2563 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval);
2567 rls_lun_statep(isp, tptr);
2569 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done", (sentstatus)? " FINAL " : "MIDTERM ", tval);
2571 if (!ok && !IS_24XX(isp)) {
2572 isp_target_putback_atio(ccb);
2574 isp_complete_ctio(ccb);
2579 isp_handle_platform_notify_scsi(ispsoftc_t *isp, in_entry_t *inot)
2581 (void) isp_notify_ack(isp, inot);
2585 isp_handle_platform_notify_fc(ispsoftc_t *isp, in_fcentry_t *inp)
2588 switch (inp->in_status) {
2589 case IN_PORT_LOGOUT:
2591 * XXX: Need to delete this initiator's WWN from the database
2592 * XXX: Need to send this LOGOUT upstream
2594 isp_prt(isp, ISP_LOGWARN, "port logout of S_ID 0x%x", inp->in_iid);
2596 case IN_PORT_CHANGED:
2597 isp_prt(isp, ISP_LOGWARN, "port changed for S_ID 0x%x", inp->in_iid);
2599 case IN_GLOBAL_LOGO:
2600 isp_del_all_wwn_entries(isp, 0);
2601 isp_prt(isp, ISP_LOGINFO, "all ports logged out");
2609 atio_private_data_t *atp;
2611 struct ccb_immediate_notify *inot = NULL;
2613 if (ISP_CAP_SCCFW(isp)) {
2614 lun = inp->in_scclun;
2618 if (ISP_CAP_2KLOGIN(isp)) {
2619 loopid = ((in_fcentry_e_t *)inp)->in_iid;
2621 loopid = inp->in_iid;
2623 if (isp_find_pdb_by_loopid(isp, 0, loopid, &lp)) {
2628 tptr = get_lun_statep(isp, 0, lun);
2630 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
2632 isp_prt(isp, ISP_LOGWARN, "ABORT TASK for lun %u- but no tstate", lun);
2636 atp = isp_get_atpd(isp, tptr, inp->in_seqid);
2639 inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots);
2640 isp_prt(isp, ISP_LOGTDEBUG0, "ABORT TASK RX_ID %x WWN 0x%016llx state %d", inp->in_seqid, (unsigned long long) wwn, atp->state);
2643 SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle);
2644 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, inot->ccb_h.path, "%s: Take FREE INOT count now %d\n", __func__, tptr->inot_count);
2646 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, tptr->owner, "out of INOT structures\n");
2649 ISP_PATH_PRT(isp, ISP_LOGWARN, tptr->owner, "abort task RX_ID %x from wwn 0x%016llx, state unknown\n", inp->in_seqid, wwn);
2652 isp_notify_t tmp, *nt = &tmp;
2653 ISP_MEMZERO(nt, sizeof (isp_notify_t));
2655 nt->nt_tgt = FCPARAM(isp, 0)->isp_wwpn;
2657 nt->nt_nphdl = loopid;
2658 nt->nt_sid = PORT_ANY;
2659 nt->nt_did = PORT_ANY;
2661 nt->nt_need_ack = 1;
2663 nt->nt_ncode = NT_ABORT_TASK;
2664 nt->nt_lreserved = inot;
2665 isp_handle_platform_target_tmf(isp, nt);
2668 rls_lun_statep(isp, tptr);
2675 (void) isp_notify_ack(isp, inp);
2680 isp_handle_platform_notify_24xx(ispsoftc_t *isp, in_fcentry_24xx_t *inot)
2685 uint8_t *ptr = NULL;
2688 nphdl = inot->in_nphdl;
2689 if (nphdl != NIL_HANDLE) {
2690 portid = inot->in_portid_hi << 16 | inot->in_portid_lo;
2695 switch (inot->in_status) {
2696 case IN24XX_ELS_RCVD:
2699 int chan = ISP_GET_VPIDX(isp, inot->in_vpidx);
2702 * Note that we're just getting notification that an ELS was received
2703 * (possibly with some associated information sent upstream). This is
2704 * *not* the same as being given the ELS frame to accept or reject.
2706 switch (inot->in_status_subcode) {
2709 if (ISP_FW_NEWER_THAN(isp, 4, 0, 25)) {
2710 ptr = (uint8_t *)inot; /* point to unswizzled entry! */
2711 wwn = (((uint64_t) ptr[IN24XX_LOGO_WWPN_OFF]) << 56) |
2712 (((uint64_t) ptr[IN24XX_LOGO_WWPN_OFF+1]) << 48) |
2713 (((uint64_t) ptr[IN24XX_LOGO_WWPN_OFF+2]) << 40) |
2714 (((uint64_t) ptr[IN24XX_LOGO_WWPN_OFF+3]) << 32) |
2715 (((uint64_t) ptr[IN24XX_LOGO_WWPN_OFF+4]) << 24) |
2716 (((uint64_t) ptr[IN24XX_LOGO_WWPN_OFF+5]) << 16) |
2717 (((uint64_t) ptr[IN24XX_LOGO_WWPN_OFF+6]) << 8) |
2718 (((uint64_t) ptr[IN24XX_LOGO_WWPN_OFF+7]));
2722 isp_del_wwn_entry(isp, chan, wwn, nphdl, portid);
2730 * Treat PRLI the same as PLOGI and make a database entry for it.
2732 if (inot->in_status_subcode == PLOGI)
2736 if (ISP_FW_NEWER_THAN(isp, 4, 0, 25)) {
2737 ptr = (uint8_t *)inot; /* point to unswizzled entry! */
2738 wwn = (((uint64_t) ptr[IN24XX_PLOGI_WWPN_OFF]) << 56) |
2739 (((uint64_t) ptr[IN24XX_PLOGI_WWPN_OFF+1]) << 48) |
2740 (((uint64_t) ptr[IN24XX_PLOGI_WWPN_OFF+2]) << 40) |
2741 (((uint64_t) ptr[IN24XX_PLOGI_WWPN_OFF+3]) << 32) |
2742 (((uint64_t) ptr[IN24XX_PLOGI_WWPN_OFF+4]) << 24) |
2743 (((uint64_t) ptr[IN24XX_PLOGI_WWPN_OFF+5]) << 16) |
2744 (((uint64_t) ptr[IN24XX_PLOGI_WWPN_OFF+6]) << 8) |
2745 (((uint64_t) ptr[IN24XX_PLOGI_WWPN_OFF+7]));
2749 isp_add_wwn_entry(isp, chan, wwn, nphdl, portid);
2758 ISP_SNPRINTF(buf, sizeof (buf), "ELS 0x%x", inot->in_status_subcode);
2762 if (inot->in_flags & IN24XX_FLAG_PUREX_IOCB) {
2763 isp_prt(isp, ISP_LOGERR, "%s Chan %d ELS N-port handle %x PortID 0x%06x marked as needing a PUREX response", msg, chan, nphdl, portid);
2766 isp_prt(isp, ISP_LOGTDEBUG0, "%s Chan %d ELS N-port handle %x PortID 0x%06x RX_ID 0x%x OX_ID 0x%x", msg, chan, nphdl, portid,
2767 inot->in_rxid, inot->in_oxid);
2768 (void) isp_notify_ack(isp, inot);
2772 case IN24XX_PORT_LOGOUT:
2773 ptr = "PORT LOGOUT";
2774 if (isp_find_pdb_by_loopid(isp, ISP_GET_VPIDX(isp, inot->in_vpidx), nphdl, &lp)) {
2775 isp_del_wwn_entry(isp, ISP_GET_VPIDX(isp, inot->in_vpidx), lp->port_wwn, nphdl, lp->portid);
2778 case IN24XX_PORT_CHANGED:
2780 ptr = "PORT CHANGED";
2783 case IN24XX_LIP_RESET:
2787 isp_prt(isp, ISP_LOGINFO, "Chan %d %s (sub-status 0x%x) for N-port handle 0x%x", ISP_GET_VPIDX(isp, inot->in_vpidx), ptr, inot->in_status_subcode, nphdl);
2790 * All subcodes here are irrelevant. What is relevant
2791 * is that we need to terminate all active commands from
2792 * this initiator (known by N-port handle).
2794 /* XXX IMPLEMENT XXX */
2795 (void) isp_notify_ack(isp, inot);
2798 case IN24XX_LINK_RESET:
2799 case IN24XX_LINK_FAILED:
2800 case IN24XX_SRR_RCVD:
2802 (void) isp_notify_ack(isp, inot);
2808 isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp)
2811 if (isp->isp_state != ISP_RUNSTATE) {
2812 isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) acked- h/w not ready (dropping)", mp->nt_ncode, mp->nt_lreserved != NULL);
2817 * This case is for a Task Management Function, which shows up as an ATIO7 entry.
2819 if (IS_24XX(isp) && mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ATIO) {
2820 ct7_entry_t local, *cto = &local;
2821 at7_entry_t *aep = (at7_entry_t *)mp->nt_lreserved;
2826 sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2];
2827 if (isp_find_pdb_by_sid(isp, mp->nt_channel, sid, &lp)) {
2832 ISP_MEMZERO(&local, sizeof (local));
2833 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
2834 cto->ct_header.rqs_entry_count = 1;
2835 cto->ct_nphdl = nphdl;
2836 cto->ct_rxid = aep->at_rxid;
2837 cto->ct_vpidx = mp->nt_channel;
2838 cto->ct_iid_lo = sid;
2839 cto->ct_iid_hi = sid >> 16;
2840 cto->ct_oxid = aep->at_hdr.ox_id;
2841 cto->ct_flags = CT7_SENDSTATUS|CT7_NOACK|CT7_NO_DATA|CT7_FLAG_MODE1;
2842 cto->ct_flags |= (aep->at_ta_len >> 12) << CT7_TASK_ATTR_SHIFT;
2843 return (isp_target_put_entry(isp, &local));
2847 * This case is for a responding to an ABTS frame
2849 if (IS_24XX(isp) && mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) {
2852 * Overload nt_need_ack here to mark whether we've terminated the associated command.
2854 if (mp->nt_need_ack) {
2855 uint8_t storage[QENTRY_LEN];
2856 ct7_entry_t *cto = (ct7_entry_t *) storage;
2857 abts_t *abts = (abts_t *)mp->nt_lreserved;
2859 ISP_MEMZERO(cto, sizeof (ct7_entry_t));
2860 isp_prt(isp, ISP_LOGTDEBUG0, "%s: [%x] terminating after ABTS received", __func__, abts->abts_rxid_task);
2861 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
2862 cto->ct_header.rqs_entry_count = 1;
2863 cto->ct_nphdl = mp->nt_nphdl;
2864 cto->ct_rxid = abts->abts_rxid_task;
2865 cto->ct_iid_lo = mp->nt_sid;
2866 cto->ct_iid_hi = mp->nt_sid >> 16;
2867 cto->ct_oxid = abts->abts_ox_id;
2868 cto->ct_vpidx = mp->nt_channel;
2869 cto->ct_flags = CT7_NOACK|CT7_TERMINATE;
2870 if (isp_target_put_entry(isp, cto)) {
2873 mp->nt_need_ack = 0;
2875 if (isp_acknak_abts(isp, mp->nt_lreserved, 0) == ENOMEM) {
2883 * Handle logout cases here
2885 if (mp->nt_ncode == NT_GLOBAL_LOGOUT) {
2886 isp_del_all_wwn_entries(isp, mp->nt_channel);
2889 if (mp->nt_ncode == NT_LOGOUT) {
2890 if (!IS_2100(isp) && IS_FC(isp)) {
2891 isp_del_wwn_entries(isp, mp);
2896 * General purpose acknowledgement
2898 if (mp->nt_need_ack) {
2899 isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) being acked", mp->nt_ncode, mp->nt_lreserved != NULL);
2900 return (isp_notify_ack(isp, mp->nt_lreserved));
2906 * Handle task management functions.
2908 * We show up here with a notify structure filled out.
2910 * The nt_lreserved tag points to the original queue entry
2913 isp_handle_platform_target_tmf(ispsoftc_t *isp, isp_notify_t *notify)
2917 struct ccb_immediate_notify *inot;
2918 inot_private_data_t *ntp = NULL;
2921 isp_prt(isp, ISP_LOGTDEBUG0, "%s: code 0x%x sid 0x%x tagval 0x%016llx chan %d lun 0x%x", __func__, notify->nt_ncode,
2922 notify->nt_sid, (unsigned long long) notify->nt_tagval, notify->nt_channel, notify->nt_lun);
2924 * NB: This assignment is necessary because of tricky type conversion.
2925 * XXX: This is tricky and I need to check this. If the lun isn't known
2926 * XXX: for the task management function, it does not of necessity follow
2927 * XXX: that it should go up stream to the wildcard listener.
2929 if (notify->nt_lun == LUN_ANY) {
2930 lun = CAM_LUN_WILDCARD;
2932 lun = notify->nt_lun;
2934 tptr = get_lun_statep(isp, notify->nt_channel, lun);
2936 tptr = get_lun_statep(isp, notify->nt_channel, CAM_LUN_WILDCARD);
2938 isp_prt(isp, ISP_LOGWARN, "%s: no state pointer found for chan %d lun 0x%x", __func__, notify->nt_channel, lun);
2942 inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots);
2944 isp_prt(isp, ISP_LOGWARN, "%s: out of immediate notify structures for chan %d lun 0x%x", __func__, notify->nt_channel, lun);
2948 if (isp_find_pdb_by_sid(isp, notify->nt_channel, notify->nt_sid, &lp) == 0) {
2949 inot->initiator_id = CAM_TARGET_WILDCARD;
2951 inot->initiator_id = lp->handle;
2953 inot->seq_id = notify->nt_tagval;
2954 inot->tag_id = notify->nt_tagval >> 32;
2956 switch (notify->nt_ncode) {
2958 isp_target_mark_aborted_early(isp, tptr, inot->tag_id);
2959 inot->arg = MSG_ABORT_TASK;
2961 case NT_ABORT_TASK_SET:
2962 isp_target_mark_aborted_early(isp, tptr, TAG_ANY);
2963 inot->arg = MSG_ABORT_TASK_SET;
2966 inot->arg = MSG_CLEAR_ACA;
2968 case NT_CLEAR_TASK_SET:
2969 inot->arg = MSG_CLEAR_TASK_SET;
2972 inot->arg = MSG_LOGICAL_UNIT_RESET;
2974 case NT_TARGET_RESET:
2975 inot->arg = MSG_TARGET_RESET;
2978 isp_prt(isp, ISP_LOGWARN, "%s: unknown TMF code 0x%x for chan %d lun 0x%x", __func__, notify->nt_ncode, notify->nt_channel, lun);
2982 ntp = isp_get_ntpd(isp, tptr);
2984 isp_prt(isp, ISP_LOGWARN, "%s: out of inotify private structures", __func__);
2987 ISP_MEMCPY(&ntp->rd.nt, notify, sizeof (isp_notify_t));
2988 if (notify->nt_lreserved) {
2989 ISP_MEMCPY(&ntp->rd.data, notify->nt_lreserved, QENTRY_LEN);
2990 ntp->rd.nt.nt_lreserved = &ntp->rd.data;
2992 ntp->rd.seq_id = notify->nt_tagval;
2993 ntp->rd.tag_id = notify->nt_tagval >> 32;
2996 SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle);
2997 rls_lun_statep(isp, tptr);
2998 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, inot->ccb_h.path, "%s: Take FREE INOT count now %d\n", __func__, tptr->inot_count);
2999 inot->ccb_h.status = CAM_MESSAGE_RECV;
3000 xpt_done((union ccb *)inot);
3004 rls_lun_statep(isp, tptr);
3006 if (notify->nt_need_ack && notify->nt_lreserved) {
3007 if (((isphdr_t *)notify->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) {
3008 (void) isp_acknak_abts(isp, notify->nt_lreserved, ENOMEM);
3010 (void) isp_notify_ack(isp, notify->nt_lreserved);
3016 * Find the associated private data and mark it as dead so
3017 * we don't try to work on it any further.
3020 isp_target_mark_aborted(ispsoftc_t *isp, union ccb *ccb)
3023 atio_private_data_t *atp;
3024 union ccb *accb = ccb->cab.abort_ccb;
3026 tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb));
3028 tptr = get_lun_statep(isp, XS_CHANNEL(accb), CAM_LUN_WILDCARD);
3030 ccb->ccb_h.status = CAM_REQ_INVALID;
3035 atp = isp_get_atpd(isp, tptr, accb->atio.tag_id);
3037 ccb->ccb_h.status = CAM_REQ_INVALID;
3040 ccb->ccb_h.status = CAM_REQ_CMP;
3042 rls_lun_statep(isp, tptr);
3046 isp_target_mark_aborted_early(ispsoftc_t *isp, tstate_t *tptr, uint32_t tag_id)
3048 atio_private_data_t *atp;
3049 inot_private_data_t *restart_queue = tptr->restart_queue;
3052 * First, clean any commands pending restart
3054 tptr->restart_queue = NULL;
3055 while (restart_queue) {
3056 uint32_t this_tag_id;
3057 inot_private_data_t *ntp = restart_queue;
3059 restart_queue = ntp->rd.nt.nt_hba;
3062 this_tag_id = ((at7_entry_t *)ntp->rd.data)->at_rxid;
3064 this_tag_id = ((at2_entry_t *)ntp->rd.data)->at_rxid;
3066 if ((uint64_t)tag_id == TAG_ANY || tag_id == this_tag_id) {
3067 isp_put_ntpd(isp, tptr, ntp);
3069 ntp->rd.nt.nt_hba = tptr->restart_queue;
3070 tptr->restart_queue = ntp;
3075 * Now mark other ones dead as well.
3077 for (atp = tptr->atpool; atp < &tptr->atpool[ATPDPSIZE]; atp++) {
3078 if ((uint64_t)tag_id == TAG_ANY || atp->tag == tag_id) {
3085 #ifdef ISP_INTERNAL_TARGET
3086 // #define ISP_FORCE_TIMEOUT 1
3087 // #define ISP_TEST_WWNS 1
3088 // #define ISP_TEST_SEPARATE_STATUS 1
3090 #define ccb_data_offset ppriv_field0
3091 #define ccb_atio ppriv_ptr1
3092 #define ccb_inot ppriv_ptr1
3094 #define MAX_ISP_TARG_TRANSFER (2 << 20)
3095 #define NISP_TARG_CMDS 1024
3096 #define NISP_TARG_NOTIFIES 1024
3097 #define DISK_SHIFT 9
3098 #define JUNK_SIZE 256
3101 #define VERIFY_10 0x2f
3104 TAILQ_HEAD(ccb_queue, ccb_hdr);
3105 extern u_int vm_kmem_size;
3107 static uint32_t disk_size;
3108 static uint8_t *disk_data = NULL;
3109 static uint8_t *junk_data;
3110 static MALLOC_DEFINE(M_ISPTARG, "ISPTARG", "ISP TARGET data");
3111 struct isptarg_softc {
3112 /* CCBs (CTIOs, ATIOs, INOTs) pending on the controller */
3113 struct ccb_queue work_queue;
3114 struct ccb_queue rework_queue;
3115 struct ccb_queue running_queue;
3116 struct ccb_queue inot_queue;
3117 struct cam_periph *periph;
3118 struct cam_path *path;
3121 static periph_ctor_t isptargctor;
3122 static periph_dtor_t isptargdtor;
3123 static periph_start_t isptargstart;
3124 static periph_init_t isptarginit;
3125 static void isptarg_done(struct cam_periph *, union ccb *);
3126 static void isptargasync(void *, u_int32_t, struct cam_path *, void *);
3129 static int isptarg_rwparm(uint8_t *, uint8_t *, uint64_t, uint32_t, uint8_t **, uint32_t *, int *);
3131 static struct periph_driver isptargdriver =
3133 isptarginit, "isptarg", TAILQ_HEAD_INITIALIZER(isptargdriver.units), /* generation */ 0
3142 isptargnotify(ispsoftc_t *isp, union ccb *iccb, struct ccb_immediate_notify *inot)
3144 struct ccb_notify_acknowledge *ack = &iccb->cna2;
3146 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, inot->ccb_h.path, "%s: [0x%x] immediate notify for 0x%x from 0x%x status 0x%x arg 0x%x\n", __func__,
3147 inot->tag_id, inot->initiator_id, inot->seq_id, inot->ccb_h.status, inot->arg);
3148 ack->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE;
3149 ack->ccb_h.flags = 0;
3150 ack->ccb_h.retry_count = 0;
3151 ack->ccb_h.cbfcnp = isptarg_done;
3152 ack->ccb_h.timeout = 0;
3153 ack->ccb_h.ccb_inot = inot;
3154 ack->tag_id = inot->tag_id;
3155 ack->seq_id = inot->seq_id;
3156 ack->initiator_id = inot->initiator_id;
3161 isptargstart(struct cam_periph *periph, union ccb *iccb)
3163 const uint8_t niliqd[SHORT_INQUIRY_LENGTH] = {
3164 0x7f, 0x0, 0x5, 0x2, 32, 0, 0, 0x32,
3165 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ',
3166 'S', 'C', 'S', 'I', ' ', 'N', 'U', 'L',
3167 'L', ' ', 'D', 'E', 'V', 'I', 'C', 'E',
3170 const uint8_t iqd[SHORT_INQUIRY_LENGTH] = {
3171 0, 0x0, 0x5, 0x2, 32, 0, 0, 0x32,
3172 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ',
3173 'S', 'C', 'S', 'I', ' ', 'M', 'E', 'M',
3174 'O', 'R', 'Y', ' ', 'D', 'I', 'S', 'K',
3177 int i, more = 0, last;
3178 struct isptarg_softc *softc = periph->softc;
3179 struct ccb_scsiio *csio;
3180 lun_id_t return_lun;
3181 struct ccb_accept_tio *atio;
3182 uint8_t *cdb, *ptr, status;
3184 uint32_t data_len, flags;
3185 struct ccb_hdr *ccbh;
3187 mtx_assert(periph->sim->mtx, MA_OWNED);
3188 ISP_PATH_PRT(softc->isp, ISP_LOGTDEBUG0, iccb->ccb_h.path, "%s: function code 0x%x INOTQ=%c WORKQ=%c REWORKQ=%c\n", __func__, iccb->ccb_h.func_code,
3189 TAILQ_FIRST(&softc->inot_queue)? 'y' : 'n', TAILQ_FIRST(&softc->work_queue)? 'y' : 'n', TAILQ_FIRST(&softc->rework_queue)? 'y' : 'n');
3191 * Check for immediate notifies first
3193 ccbh = TAILQ_FIRST(&softc->inot_queue);
3195 TAILQ_REMOVE(&softc->inot_queue, ccbh, periph_links.tqe);
3196 if (TAILQ_FIRST(&softc->inot_queue) || TAILQ_FIRST(&softc->work_queue) || TAILQ_FIRST(&softc->rework_queue)) {
3197 xpt_schedule(periph, 1);
3199 isptargnotify(softc->isp, iccb, (struct ccb_immediate_notify *)ccbh);
3204 * Check the rework (continuation) work queue first.
3206 ccbh = TAILQ_FIRST(&softc->rework_queue);
3208 atio = (struct ccb_accept_tio *)ccbh;
3209 TAILQ_REMOVE(&softc->rework_queue, ccbh, periph_links.tqe);
3210 more = TAILQ_FIRST(&softc->work_queue) || TAILQ_FIRST(&softc->rework_queue);
3212 ccbh = TAILQ_FIRST(&softc->work_queue);
3214 ISP_PATH_PRT(softc->isp, ISP_LOGTDEBUG0, iccb->ccb_h.path, "%s: woken up but no work?\n", __func__);
3215 xpt_release_ccb(iccb);
3218 atio = (struct ccb_accept_tio *)ccbh;
3219 TAILQ_REMOVE(&softc->work_queue, ccbh, periph_links.tqe);
3220 more = TAILQ_FIRST(&softc->work_queue) != NULL;
3221 atio->ccb_h.ccb_data_offset = 0;
3224 if (atio->tag_id == 0xffffffff || atio->ccb_h.func_code != XPT_ACCEPT_TARGET_IO) {
3231 status = SCSI_STATUS_OK;
3232 flags = CAM_SEND_STATUS;
3233 memset(&atio->sense_data, 0, sizeof (atio->sense_data));
3234 cdb = atio->cdb_io.cdb_bytes;
3235 ISP_PATH_PRT(softc->isp, ISP_LOGTDEBUG0, ccbh->path, "%s: [0x%x] processing ATIO from 0x%x CDB=0x%x data_offset=%u\n", __func__, atio->tag_id, atio->init_id,
3236 cdb[0], atio->ccb_h.ccb_data_offset);
3238 return_lun = XS_LUN(atio);
3239 if (return_lun != 0) {
3240 xpt_print(atio->ccb_h.path, "[0x%x] Non-Zero Lun %d: cdb0=0x%x\n", atio->tag_id, return_lun, cdb[0]);
3241 if (cdb[0] != INQUIRY && cdb[0] != REPORT_LUNS && cdb[0] != REQUEST_SENSE) {
3242 status = SCSI_STATUS_CHECK_COND;
3243 atio->sense_data.error_code = SSD_ERRCODE_VALID|SSD_CURRENT_ERROR|SSD_KEY_ILLEGAL_REQUEST;
3244 atio->sense_data.add_sense_code = 0x25;
3245 atio->sense_data.add_sense_code_qual = 0x0;
3246 atio->sense_len = sizeof (atio->sense_data);
3248 return_lun = CAM_LUN_WILDCARD;
3253 flags |= CAM_DIR_IN;
3254 data_len = sizeof (atio->sense_data);
3255 junk_data[0] = SSD_ERRCODE_VALID|SSD_CURRENT_ERROR|SSD_KEY_NO_SENSE;
3256 memset(junk_data+1, 0, data_len-1);
3257 if (data_len > cdb[4]) {
3261 data_ptr = junk_data;
3268 if (isptarg_rwparm(cdb, disk_data, disk_size, atio->ccb_h.ccb_data_offset, &data_ptr, &data_len, &last)) {
3269 status = SCSI_STATUS_CHECK_COND;
3270 atio->sense_data.error_code = SSD_ERRCODE_VALID|SSD_CURRENT_ERROR|SSD_KEY_UNIT_ATTENTION;
3271 atio->sense_data.add_sense_code = 0x5;
3272 atio->sense_data.add_sense_code_qual = 0x24;
3273 atio->sense_len = sizeof (atio->sense_data);
3275 #ifdef ISP_FORCE_TIMEOUT
3280 xpt_schedule(periph, 1);
3287 #ifdef ISP_TEST_SEPARATE_STATUS
3288 if (last && data_len) {
3293 flags &= ~CAM_SEND_STATUS;
3296 atio->ccb_h.ccb_data_offset += data_len;
3297 flags |= CAM_DIR_IN;
3299 flags |= CAM_DIR_NONE;
3307 if (isptarg_rwparm(cdb, disk_data, disk_size, atio->ccb_h.ccb_data_offset, &data_ptr, &data_len, &last)) {
3308 status = SCSI_STATUS_CHECK_COND;
3309 atio->sense_data.error_code = SSD_ERRCODE_VALID|SSD_CURRENT_ERROR|SSD_KEY_UNIT_ATTENTION;
3310 atio->sense_data.add_sense_code = 0x5;
3311 atio->sense_data.add_sense_code_qual = 0x24;
3312 atio->sense_len = sizeof (atio->sense_data);
3314 #ifdef ISP_FORCE_TIMEOUT
3319 xpt_schedule(periph, 1);
3326 #ifdef ISP_TEST_SEPARATE_STATUS
3327 if (last && data_len) {
3332 flags &= ~CAM_SEND_STATUS;
3335 atio->ccb_h.ccb_data_offset += data_len;
3336 flags |= CAM_DIR_OUT;
3338 flags |= CAM_DIR_NONE;
3343 flags |= CAM_DIR_IN;
3344 if (cdb[1] || cdb[2] || cdb[3]) {
3345 status = SCSI_STATUS_CHECK_COND;
3346 atio->sense_data.error_code = SSD_ERRCODE_VALID|SSD_CURRENT_ERROR|SSD_KEY_UNIT_ATTENTION;
3347 atio->sense_data.add_sense_code = 0x5;
3348 atio->sense_data.add_sense_code_qual = 0x20;
3349 atio->sense_len = sizeof (atio->sense_data);
3352 data_len = sizeof (iqd);
3353 if (data_len > cdb[4]) {
3357 if (XS_LUN(iccb) != 0) {
3358 memcpy(junk_data, niliqd, sizeof (iqd));
3360 memcpy(junk_data, iqd, sizeof (iqd));
3362 data_ptr = junk_data;
3365 case TEST_UNIT_READY:
3366 flags |= CAM_DIR_NONE;
3369 status = SCSI_STATUS_CHECK_COND;
3370 atio->sense_data.error_code = SSD_ERRCODE_VALID|SSD_CURRENT_ERROR|SSD_KEY_UNIT_ATTENTION;
3371 atio->sense_data.add_sense_code = 0x28;
3372 atio->sense_data.add_sense_code_qual = 0x0;
3373 atio->sense_len = sizeof (atio->sense_data);
3376 case SYNCHRONIZE_CACHE:
3381 flags |= CAM_DIR_NONE;
3385 flags |= CAM_DIR_IN;
3386 if (cdb[2] || cdb[3] || cdb[4] || cdb[5]) {
3387 status = SCSI_STATUS_CHECK_COND;
3388 atio->sense_data.error_code = SSD_ERRCODE_VALID|SSD_CURRENT_ERROR|SSD_KEY_UNIT_ATTENTION;
3389 atio->sense_data.add_sense_code = 0x5;
3390 atio->sense_data.add_sense_code_qual = 0x24;
3391 atio->sense_len = sizeof (atio->sense_data);
3394 if (cdb[8] & 0x1) { /* PMI */
3395 junk_data[0] = 0xff;
3396 junk_data[1] = 0xff;
3397 junk_data[2] = 0xff;
3398 junk_data[3] = 0xff;
3400 uint64_t last_blk = (disk_size >> DISK_SHIFT) - 1;
3401 if (last_blk < 0xffffffffULL) {
3402 junk_data[0] = (last_blk >> 24) & 0xff;
3403 junk_data[1] = (last_blk >> 16) & 0xff;
3404 junk_data[2] = (last_blk >> 8) & 0xff;
3405 junk_data[3] = (last_blk) & 0xff;
3407 junk_data[0] = 0xff;
3408 junk_data[1] = 0xff;
3409 junk_data[2] = 0xff;
3410 junk_data[3] = 0xff;
3413 junk_data[4] = ((1 << DISK_SHIFT) >> 24) & 0xff;
3414 junk_data[5] = ((1 << DISK_SHIFT) >> 16) & 0xff;
3415 junk_data[6] = ((1 << DISK_SHIFT) >> 8) & 0xff;
3416 junk_data[7] = ((1 << DISK_SHIFT)) & 0xff;
3417 data_ptr = junk_data;
3421 flags |= CAM_DIR_IN;
3422 memset(junk_data, 0, JUNK_SIZE);
3423 junk_data[0] = (1 << 3) >> 24;
3424 junk_data[1] = (1 << 3) >> 16;
3425 junk_data[2] = (1 << 3) >> 8;
3426 junk_data[3] = (1 << 3);
3428 for (i = 0; i < 1; i++) {
3429 ptr = &junk_data[8 + (1 << 3)];
3431 ptr[0] = 0x40 | ((i >> 8) & 0x3f);
3435 data_ptr = junk_data;
3436 data_len = (ptr + 8) - junk_data;
3440 flags |= CAM_DIR_NONE;
3441 status = SCSI_STATUS_CHECK_COND;
3442 atio->sense_data.error_code = SSD_ERRCODE_VALID|SSD_CURRENT_ERROR|SSD_KEY_UNIT_ATTENTION;
3443 atio->sense_data.add_sense_code = 0x5;
3444 atio->sense_data.add_sense_code_qual = 0x20;
3445 atio->sense_len = sizeof (atio->sense_data);
3450 * If we are done with the transaction, tell the
3451 * controller to send status and perform a CMD_CMPLT.
3452 * If we have associated sense data, see if we can
3455 if (status == SCSI_STATUS_CHECK_COND) {
3456 flags |= CAM_SEND_SENSE;
3457 csio->sense_len = atio->sense_len;
3458 csio->sense_data = atio->sense_data;
3459 flags &= ~CAM_DIR_MASK;
3463 cam_fill_ctio(csio, 0, isptarg_done, flags, MSG_SIMPLE_Q_TAG, atio->tag_id, atio->init_id, status, data_ptr, data_len, 0);
3464 iccb->ccb_h.target_id = atio->ccb_h.target_id;
3465 iccb->ccb_h.target_lun = return_lun;
3466 iccb->ccb_h.ccb_atio = atio;
3469 if ((atio->ccb_h.status & CAM_DEV_QFRZN) != 0) {
3470 cam_release_devq(periph->path, 0, 0, 0, 0);
3471 atio->ccb_h.status &= ~CAM_DEV_QFRZN;
3474 xpt_schedule(periph, 1);
3479 isptargctor(struct cam_periph *periph, void *arg)
3481 struct isptarg_softc *softc;
3483 softc = (struct isptarg_softc *)arg;
3484 periph->softc = softc;
3485 softc->periph = periph;
3486 softc->path = periph->path;
3487 ISP_PATH_PRT(softc->isp, ISP_LOGTDEBUG0, periph->path, "%s called\n", __func__);
3488 return (CAM_REQ_CMP);
3492 isptargdtor(struct cam_periph *periph)
3494 struct isptarg_softc *softc;
3495 softc = (struct isptarg_softc *)periph->softc;
3496 ISP_PATH_PRT(softc->isp, ISP_LOGTDEBUG0, periph->path, "%s called\n", __func__);
3497 softc->periph = NULL;
3499 periph->softc = NULL;
3503 isptarg_done(struct cam_periph *periph, union ccb *ccb)
3505 struct isptarg_softc *softc;
3507 struct ccb_accept_tio *atio;
3508 struct ccb_immediate_notify *inot;
3511 softc = (struct isptarg_softc *)periph->softc;
3513 status = ccb->ccb_h.status & CAM_STATUS_MASK;
3515 switch (ccb->ccb_h.func_code) {
3516 case XPT_ACCEPT_TARGET_IO:
3517 atio = (struct ccb_accept_tio *) ccb;
3518 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "[0x%x] ATIO seen in %s\n", atio->tag_id, __func__);
3519 TAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, periph_links.tqe);
3520 xpt_schedule(periph, 1);
3522 case XPT_IMMEDIATE_NOTIFY:
3523 inot = (struct ccb_immediate_notify *) ccb;
3524 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "[0x%x] INOT for 0x%x seen in %s\n", inot->tag_id, inot->seq_id, __func__);
3525 TAILQ_INSERT_TAIL(&softc->inot_queue, &ccb->ccb_h, periph_links.tqe);
3526 xpt_schedule(periph, 1);
3528 case XPT_CONT_TARGET_IO:
3529 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
3530 cam_release_devq(ccb->ccb_h.path, 0, 0, 0, 0);
3531 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3533 atio = ccb->ccb_h.ccb_atio;
3534 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3535 cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL);
3536 xpt_action((union ccb *)atio);
3537 } else if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
3538 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "[0x%x] MID CTIO seen in %s\n", atio->tag_id, __func__);
3539 TAILQ_INSERT_TAIL(&softc->rework_queue, &atio->ccb_h, periph_links.tqe);
3540 xpt_schedule(periph, 1);
3542 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "[0x%x] FINAL CTIO seen in %s\n", atio->tag_id, __func__);
3543 xpt_action((union ccb *)atio);
3545 xpt_release_ccb(ccb);
3547 case XPT_NOTIFY_ACKNOWLEDGE:
3548 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
3549 cam_release_devq(ccb->ccb_h.path, 0, 0, 0, 0);
3550 ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3552 inot = ccb->ccb_h.ccb_inot;
3553 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, inot->ccb_h.path, "[0x%x] recycle notify for tag 0x%x\n", inot->tag_id, inot->seq_id);
3554 xpt_release_ccb(ccb);
3555 xpt_action((union ccb *)inot);
3558 xpt_print(ccb->ccb_h.path, "unexpected code 0x%x\n", ccb->ccb_h.func_code);
3564 isptargasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
3566 struct ac_contract *acp = arg;
3567 struct ac_device_changed *fc = (struct ac_device_changed *) acp->contract_data;
3569 if (code != AC_CONTRACT) {
3572 xpt_print(path, "0x%016llx Port ID 0x%06x %s\n", (unsigned long long) fc->wwpn, fc->port, fc->arrived? "arrived" : "departed");
3576 isp_target_thread(ispsoftc_t *isp, int chan)
3578 union ccb *ccb = NULL;
3582 struct isptarg_softc *softc = NULL;
3583 struct cam_periph *periph = NULL, *wperiph = NULL;
3584 struct cam_path *path, *wpath;
3585 struct cam_sim *sim;
3587 if (disk_data == NULL) {
3588 disk_size = roundup2(vm_kmem_size >> 1, (1ULL << 20));
3589 if (disk_size < (50 << 20)) {
3590 disk_size = 50 << 20;
3592 disk_data = malloc(disk_size, M_ISPTARG, M_WAITOK | M_ZERO);
3593 if (disk_data == NULL) {
3594 isp_prt(isp, ISP_LOGERR, "%s: could not allocate disk data", __func__);
3597 isp_prt(isp, ISP_LOGINFO, "allocated a %ju MiB disk", (uintmax_t) (disk_size >> 20));
3599 junk_data = malloc(JUNK_SIZE, M_ISPTARG, M_WAITOK | M_ZERO);
3600 if (junk_data == NULL) {
3601 isp_prt(isp, ISP_LOGERR, "%s: could not allocate junk", __func__);
3606 softc = malloc(sizeof (*softc), M_ISPTARG, M_WAITOK | M_ZERO);
3607 if (softc == NULL) {
3608 isp_prt(isp, ISP_LOGERR, "%s: could not allocate softc", __func__);
3611 TAILQ_INIT(&softc->work_queue);
3612 TAILQ_INIT(&softc->rework_queue);
3613 TAILQ_INIT(&softc->running_queue);
3614 TAILQ_INIT(&softc->inot_queue);
3617 periphdriver_register(&isptargdriver);
3618 ISP_GET_PC(isp, chan, sim, sim);
3619 ISP_GET_PC(isp, chan, path, path);
3620 status = xpt_create_path_unlocked(&wpath, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
3621 if (status != CAM_REQ_CMP) {
3622 isp_prt(isp, ISP_LOGERR, "%s: could not allocate wildcard path", __func__);
3625 status = xpt_create_path_unlocked(&path, NULL, cam_sim_path(sim), 0, 0);
3626 if (status != CAM_REQ_CMP) {
3627 xpt_free_path(wpath);
3628 isp_prt(isp, ISP_LOGERR, "%s: could not allocate path", __func__);
3632 ccb = xpt_alloc_ccb();
3635 status = cam_periph_alloc(isptargctor, NULL, isptargdtor, isptargstart, "isptarg", CAM_PERIPH_BIO, wpath, NULL, 0, softc);
3636 if (status != CAM_REQ_CMP) {
3638 isp_prt(isp, ISP_LOGERR, "%s: cam_periph_alloc for wildcard failed", __func__);
3641 wperiph = cam_periph_find(wpath, "isptarg");
3642 if (wperiph == NULL) {
3644 isp_prt(isp, ISP_LOGERR, "%s: wildcard periph already allocated but doesn't exist", __func__);
3648 status = cam_periph_alloc(isptargctor, NULL, isptargdtor, isptargstart, "isptarg", CAM_PERIPH_BIO, path, NULL, 0, softc);
3649 if (status != CAM_REQ_CMP) {
3651 isp_prt(isp, ISP_LOGERR, "%s: cam_periph_alloc failed", __func__);
3655 periph = cam_periph_find(path, "isptarg");
3656 if (periph == NULL) {
3658 isp_prt(isp, ISP_LOGERR, "%s: periph already allocated but doesn't exist", __func__);
3662 status = xpt_register_async(AC_CONTRACT, isptargasync, isp, wpath);
3663 if (status != CAM_REQ_CMP) {
3665 isp_prt(isp, ISP_LOGERR, "%s: xpt_register_async failed", __func__);
3671 ccb = xpt_alloc_ccb();
3674 * Make sure role is none.
3676 xpt_setup_ccb(&ccb->ccb_h, periph->path, 10);
3677 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
3678 ccb->knob.xport_specific.fc.role = KNOB_ROLE_NONE;
3679 #ifdef ISP_TEST_WWNS
3680 ccb->knob.xport_specific.fc.valid = KNOB_VALID_ROLE | KNOB_VALID_ADDRESS;
3681 ccb->knob.xport_specific.fc.wwnn = 0x508004d000000000ULL | (device_get_unit(isp->isp_osinfo.dev) << 8) | (chan << 16);
3682 ccb->knob.xport_specific.fc.wwpn = 0x508004d000000001ULL | (device_get_unit(isp->isp_osinfo.dev) << 8) | (chan << 16);
3684 ccb->knob.xport_specific.fc.valid = KNOB_VALID_ROLE;
3694 xpt_setup_ccb(&ccb->ccb_h, periph->path, 10);
3695 ccb->ccb_h.func_code = XPT_EN_LUN;
3696 ccb->cel.enable = 1;
3700 if (ccb->ccb_h.status != CAM_REQ_CMP) {
3702 xpt_print(periph->path, "failed to enable lun (0x%x)\n", ccb->ccb_h.status);
3706 xpt_setup_ccb(&ccb->ccb_h, wperiph->path, 10);
3707 ccb->ccb_h.func_code = XPT_EN_LUN;
3708 ccb->cel.enable = 1;
3712 if (ccb->ccb_h.status != CAM_REQ_CMP) {
3714 xpt_print(wperiph->path, "failed to enable lun (0x%x)\n", ccb->ccb_h.status);
3722 ISP_GET_PC_ADDR(isp, chan, target_proc, wchan);
3723 for (i = 0; i < 4; i++) {
3724 ccb = malloc(sizeof (*ccb), M_ISPTARG, M_WAITOK | M_ZERO);
3725 xpt_setup_ccb(&ccb->ccb_h, wperiph->path, 1);
3726 ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
3727 ccb->ccb_h.cbfcnp = isptarg_done;
3732 for (i = 0; i < NISP_TARG_CMDS; i++) {
3733 ccb = malloc(sizeof (*ccb), M_ISPTARG, M_WAITOK | M_ZERO);
3734 xpt_setup_ccb(&ccb->ccb_h, periph->path, 1);
3735 ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
3736 ccb->ccb_h.cbfcnp = isptarg_done;
3741 for (i = 0; i < 4; i++) {
3742 ccb = malloc(sizeof (*ccb), M_ISPTARG, M_WAITOK | M_ZERO);
3743 xpt_setup_ccb(&ccb->ccb_h, wperiph->path, 1);
3744 ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
3745 ccb->ccb_h.cbfcnp = isptarg_done;
3750 for (i = 0; i < NISP_TARG_NOTIFIES; i++) {
3751 ccb = malloc(sizeof (*ccb), M_ISPTARG, M_WAITOK | M_ZERO);
3752 xpt_setup_ccb(&ccb->ccb_h, periph->path, 1);
3753 ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
3754 ccb->ccb_h.cbfcnp = isptarg_done;
3761 * Now turn it all back on
3763 xpt_setup_ccb(&ccb->ccb_h, periph->path, 10);
3764 ccb->ccb_h.func_code = XPT_SET_SIM_KNOB;
3765 ccb->knob.xport_specific.fc.valid = KNOB_VALID_ROLE;
3766 ccb->knob.xport_specific.fc.role = KNOB_ROLE_TARGET;
3772 * Okay, while things are still active, sleep...
3776 ISP_GET_PC(isp, chan, proc_active, i);
3780 msleep(wchan, &isp->isp_lock, PUSER, "tsnooze", 0);
3786 cam_periph_invalidate(wperiph);
3789 cam_periph_invalidate(periph);
3792 free(junk_data, M_ISPTARG);
3795 free(disk_data, M_ISPTARG);
3798 free(softc, M_ISPTARG);
3800 xpt_free_path(path);
3801 xpt_free_path(wpath);
3805 isp_target_thread_pi(void *arg)
3807 struct isp_spi *pi = arg;
3808 isp_target_thread(cam_sim_softc(pi->sim), cam_sim_bus(pi->sim));
3812 isp_target_thread_fc(void *arg)
3814 struct isp_fc *fc = arg;
3815 isp_target_thread(cam_sim_softc(fc->sim), cam_sim_bus(fc->sim));
3819 isptarg_rwparm(uint8_t *cdb, uint8_t *dp, uint64_t dl, uint32_t offset, uint8_t **kp, uint32_t *tl, int *lp)
3821 uint32_t cnt, curcnt;
3827 cnt = (((uint32_t)cdb[10]) << 24) |
3828 (((uint32_t)cdb[11]) << 16) |
3829 (((uint32_t)cdb[12]) << 8) |
3830 ((uint32_t)cdb[13]);
3832 lba = (((uint64_t)cdb[2]) << 56) |
3833 (((uint64_t)cdb[3]) << 48) |
3834 (((uint64_t)cdb[4]) << 40) |
3835 (((uint64_t)cdb[5]) << 32) |
3836 (((uint64_t)cdb[6]) << 24) |
3837 (((uint64_t)cdb[7]) << 16) |
3838 (((uint64_t)cdb[8]) << 8) |
3843 cnt = (((uint32_t)cdb[6]) << 16) |
3844 (((uint32_t)cdb[7]) << 8) |
3845 ((u_int32_t)cdb[8]);
3847 lba = (((uint32_t)cdb[2]) << 24) |
3848 (((uint32_t)cdb[3]) << 16) |
3849 (((uint32_t)cdb[4]) << 8) |
3854 cnt = (((uint32_t)cdb[7]) << 8) |
3855 ((u_int32_t)cdb[8]);
3857 lba = (((uint32_t)cdb[2]) << 24) |
3858 (((uint32_t)cdb[3]) << 16) |
3859 (((uint32_t)cdb[4]) << 8) |
3868 lba = (((uint32_t)cdb[1] & 0x1f) << 16) |
3869 (((uint32_t)cdb[2]) << 8) |
3879 if (offset == cnt) {
3884 if (lba + cnt > dl) {
3889 curcnt = MAX_ISP_TARG_TRANSFER;
3890 if (offset + curcnt >= cnt) {
3891 curcnt = cnt - offset;
3897 *kp = &dp[lba + offset];
3905 isp_cam_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg)
3907 struct cam_sim *sim;
3911 sim = (struct cam_sim *)cbarg;
3912 isp = (ispsoftc_t *) cam_sim_softc(sim);
3913 bus = cam_sim_bus(sim);
3914 tgt = xpt_path_target_id(path);
3917 case AC_LOST_DEVICE:
3919 uint16_t oflags, nflags;
3920 sdparam *sdp = SDPARAM(isp, bus);
3923 nflags = sdp->isp_devparam[tgt].nvrm_flags;
3924 #ifndef ISP_TARGET_MODE
3925 nflags &= DPARM_SAFE_DFLT;
3926 if (isp->isp_loaded_fw) {
3927 nflags |= DPARM_NARROW | DPARM_ASYNC;
3930 nflags = DPARM_DEFAULT;
3932 oflags = sdp->isp_devparam[tgt].goal_flags;
3933 sdp->isp_devparam[tgt].goal_flags = nflags;
3934 sdp->isp_devparam[tgt].dev_update = 1;
3936 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, bus);
3937 sdp->isp_devparam[tgt].goal_flags = oflags;
3942 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
3948 isp_poll(struct cam_sim *sim)
3950 ispsoftc_t *isp = cam_sim_softc(sim);
3952 uint16_t sema, mbox;
3954 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
3955 isp_intr(isp, isr, sema, mbox);
3961 isp_watchdog(void *arg)
3963 struct ccb_scsiio *xs = arg;
3965 uint32_t ohandle = ISP_HANDLE_FREE, handle;
3969 handle = isp_find_handle(isp, xs);
3971 if (handle != ISP_HANDLE_FREE && !XS_CMD_WPEND_P(xs)) {
3972 isp_xs_prt(isp, xs, ISP_LOGWARN, "first watchdog (handle 0x%x) timed out- deferring for grace period", handle);
3973 callout_reset(&PISP_PCMD(xs)->wdog, 2 * hz, isp_watchdog, xs);
3980 * Hand crank the interrupt code just to be sure the command isn't stuck somewhere.
3982 if (handle != ISP_HANDLE_FREE) {
3984 uint16_t sema, mbox;
3985 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) != 0) {
3986 isp_intr(isp, isr, sema, mbox);
3989 handle = isp_find_handle(isp, xs);
3991 if (handle != ISP_HANDLE_FREE) {
3993 * Try and make sure the command is really dead before
3994 * we release the handle (and DMA resources) for reuse.
3996 * If we are successful in aborting the command then
3997 * we're done here because we'll get the command returned
4000 if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) {
4005 * Note that after calling the above, the command may in
4006 * fact have been completed.
4008 xs = isp_find_xs(isp, handle);
4011 * If the command no longer exists, then we won't
4012 * be able to find the xs again with this handle.
4019 * After this point, the command is really dead.
4021 if (XS_XFRLEN(xs)) {
4022 ISP_DMAFREE(isp, xs, handle);
4024 isp_destroy_handle(isp, handle);
4025 isp_prt(isp, ISP_LOGERR, "%s: timeout for handle 0x%x", __func__, handle);
4026 XS_SETERR(xs, CAM_CMD_TIMEOUT);
4027 isp_prt_endcmd(isp, xs);
4030 if (ohandle != ISP_HANDLE_FREE) {
4031 isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle 0x%x, recovered during interrupt", __func__, ohandle);
4033 isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle already free", __func__);
4039 isp_make_here(ispsoftc_t *isp, int chan, int tgt)
4042 struct isp_fc *fc = ISP_FC_PC(isp, chan);
4044 if (isp_autoconfig == 0) {
4049 * Allocate a CCB, create a wildcard path for this target and schedule a rescan.
4051 ccb = xpt_alloc_ccb_nowait();
4053 isp_prt(isp, ISP_LOGWARN, "Chan %d unable to alloc CCB for rescan", chan);
4056 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
4057 isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan");
4065 isp_make_gone(ispsoftc_t *isp, int chan, int tgt)
4067 struct cam_path *tp;
4068 struct isp_fc *fc = ISP_FC_PC(isp, chan);
4070 if (isp_autoconfig == 0) {
4073 if (xpt_create_path(&tp, NULL, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
4074 xpt_async(AC_LOST_DEVICE, tp, NULL);
4080 * Gone Device Timer Function- when we have decided that a device has gone
4081 * away, we wait a specific period of time prior to telling the OS it has
4084 * This timer function fires once a second and then scans the port database
4085 * for devices that are marked dead but still have a virtual target assigned.
4086 * We decrement a counter for that port database entry, and when it hits zero,
4087 * we tell the OS the device has gone away.
4092 struct isp_fc *fc = arg;
4093 taskqueue_enqueue(taskqueue_thread, &fc->gtask);
4097 isp_gdt_task(void *arg, int pending)
4099 struct isp_fc *fc = arg;
4100 ispsoftc_t *isp = fc->isp;
4101 int chan = fc - isp->isp_osinfo.pc.fc;
4103 int dbidx, tgt, more_to_do = 0;
4106 isp_prt(isp, ISP_LOGDEBUG0, "Chan %d GDT timer expired", chan);
4107 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
4108 lp = &FCPARAM(isp, chan)->portdb[dbidx];
4110 if (lp->state != FC_PORTDB_STATE_ZOMBIE) {
4113 if (lp->dev_map_idx == 0 || lp->target_mode) {
4116 if (lp->gone_timer != 0) {
4117 isp_prt(isp, ISP_LOGSANCFG, "%s: Chan %d more to do for target %u (timer=%u)", __func__, chan, lp->dev_map_idx - 1, lp->gone_timer);
4118 lp->gone_timer -= 1;
4122 tgt = lp->dev_map_idx - 1;
4123 FCPARAM(isp, chan)->isp_dev_map[tgt] = 0;
4124 lp->dev_map_idx = 0;
4125 lp->state = FC_PORTDB_STATE_NIL;
4126 isp_prt(isp, ISP_LOGCONFIG, prom3, chan, lp->portid, tgt, "Gone Device Timeout");
4127 isp_make_gone(isp, chan, tgt);
4131 callout_reset(&fc->gdt, hz, isp_gdt, fc);
4133 callout_deactivate(&fc->gdt);
4134 isp_prt(isp, ISP_LOGSANCFG, "Chan %d Stopping Gone Device Timer @ %lu", chan, (unsigned long) time_uptime);
4141 * Loop Down Timer Function- when loop goes down, a timer is started and
4142 * and after it expires we come here and take all probational devices that
4143 * the OS knows about and the tell the OS that they've gone away.
4145 * We don't clear the devices out of our port database because, when loop
4146 * come back up, we have to do some actual cleanup with the chip at that
4147 * point (implicit PLOGO, e.g., to get the chip's port database state right).
4152 struct isp_fc *fc = arg;
4153 taskqueue_enqueue(taskqueue_thread, &fc->ltask);
4157 isp_ldt_task(void *arg, int pending)
4159 struct isp_fc *fc = arg;
4160 ispsoftc_t *isp = fc->isp;
4161 int chan = fc - isp->isp_osinfo.pc.fc;
4166 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Chan %d Loop Down Timer expired @ %lu", chan, (unsigned long) time_uptime);
4167 callout_deactivate(&fc->ldt);
4170 * Notify to the OS all targets who we now consider have departed.
4172 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
4173 lp = &FCPARAM(isp, chan)->portdb[dbidx];
4175 if (lp->state != FC_PORTDB_STATE_PROBATIONAL) {
4178 if (lp->dev_map_idx == 0 || lp->target_mode) {
4183 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST!
4187 for (i = 0; i < isp->isp_maxcmds; i++) {
4188 struct ccb_scsiio *xs;
4190 if (!ISP_VALID_HANDLE(isp, isp->isp_xflist[i].handle)) {
4193 if ((xs = isp->isp_xflist[i].cmd) == NULL) {
4196 if (dbidx != (FCPARAM(isp, chan)->isp_dev_map[XS_TGT(xs)] - 1)) {
4199 isp_prt(isp, ISP_LOGWARN, "command handle 0x%08x for %d.%d.%d orphaned by loop down timeout",
4200 isp->isp_xflist[i].handle, chan, XS_TGT(xs), XS_LUN(xs));
4204 * Mark that we've announced that this device is gone....
4209 * but *don't* change the state of the entry. Just clear
4210 * any target id stuff and announce to CAM that the
4211 * device is gone. This way any necessary PLOGO stuff
4212 * will happen when loop comes back up.
4215 tgt = lp->dev_map_idx - 1;
4216 FCPARAM(isp, chan)->isp_dev_map[tgt] = 0;
4217 lp->dev_map_idx = 0;
4218 lp->state = FC_PORTDB_STATE_NIL;
4219 isp_prt(isp, ISP_LOGCONFIG, prom3, chan, lp->portid, tgt, "Loop Down Timeout");
4220 isp_make_gone(isp, chan, tgt);
4223 if (FCPARAM(isp, chan)->role & ISP_ROLE_INITIATOR) {
4224 isp_unfreeze_loopdown(isp, chan);
4227 * The loop down timer has expired. Wake up the kthread
4228 * to notice that fact (or make it false).
4231 fc->loop_down_time = fc->loop_down_limit+1;
4237 isp_kthread(void *arg)
4239 struct isp_fc *fc = arg;
4240 ispsoftc_t *isp = fc->isp;
4241 int chan = fc - isp->isp_osinfo.pc.fc;
4244 mtx_lock(&isp->isp_osinfo.lock);
4249 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "%s: Chan %d checking FC state", __func__, chan);
4250 lb = isp_fc_runstate(isp, chan, 250000);
4253 * Our action is different based upon whether we're supporting
4254 * Initiator mode or not. If we are, we might freeze the simq
4255 * when loop is down and set all sorts of different delays to
4258 * If not, we simply just wait for loop to come up.
4260 if (lb && (FCPARAM(isp, chan)->role & ISP_ROLE_INITIATOR)) {
4262 * Increment loop down time by the last sleep interval
4264 fc->loop_down_time += slp;
4267 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "%s: Chan %d FC loop not up (down count %d)", __func__, chan, fc->loop_down_time);
4269 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "%s: Chan %d FC got to %d (down count %d)", __func__, chan, lb, fc->loop_down_time);
4273 * If we've never seen loop up and we've waited longer
4274 * than quickboot time, or we've seen loop up but we've
4275 * waited longer than loop_down_limit, give up and go
4276 * to sleep until loop comes up.
4278 if (FCPARAM(isp, chan)->loop_seen_once == 0) {
4279 lim = isp_quickboot_time;
4281 lim = fc->loop_down_limit;
4283 if (fc->loop_down_time >= lim) {
4284 isp_freeze_loopdown(isp, chan, "loop limit hit");
4286 } else if (fc->loop_down_time < 10) {
4288 } else if (fc->loop_down_time < 30) {
4290 } else if (fc->loop_down_time < 60) {
4292 } else if (fc->loop_down_time < 120) {
4299 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "%s: Chan %d FC Loop Down", __func__, chan);
4300 fc->loop_down_time += slp;
4303 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "%s: Chan %d FC state OK", __func__, chan);
4304 fc->loop_down_time = 0;
4310 * If this is past the first loop up or the loop is dead and if we'd frozen the simq, unfreeze it
4311 * now so that CAM can start sending us commands.
4313 * If the FC state isn't okay yet, they'll hit that in isp_start which will freeze the queue again
4314 * or kill the commands, as appropriate.
4317 if (FCPARAM(isp, chan)->loop_seen_once || fc->loop_dead) {
4318 isp_unfreeze_loopdown(isp, chan);
4321 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "%s: Chan %d sleep time %d", __func__, chan, slp);
4323 msleep(fc, &isp->isp_osinfo.lock, PRIBIO, "ispf", slp * hz);
4326 * If slp is zero, we're waking up for the first time after
4327 * things have been okay. In this case, we set a deferral state
4328 * for all commands and delay hysteresis seconds before starting
4329 * the FC state evaluation. This gives the loop/fabric a chance
4332 if (slp == 0 && fc->hysteresis) {
4333 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "%s: Chan %d sleep hysteresis ticks %d", __func__, chan, fc->hysteresis * hz);
4334 mtx_unlock(&isp->isp_osinfo.lock);
4335 pause("ispt", fc->hysteresis * hz);
4336 mtx_lock(&isp->isp_osinfo.lock);
4339 mtx_unlock(&isp->isp_osinfo.lock);
4343 isp_action(struct cam_sim *sim, union ccb *ccb)
4345 int bus, tgt, ts, error, lim;
4347 struct ccb_trans_settings *cts;
4349 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
4351 isp = (ispsoftc_t *)cam_sim_softc(sim);
4352 mtx_assert(&isp->isp_lock, MA_OWNED);
4354 if (isp->isp_state != ISP_RUNSTATE && ccb->ccb_h.func_code == XPT_SCSI_IO) {
4356 if (isp->isp_state != ISP_INITSTATE) {
4358 * Lie. Say it was a selection timeout.
4360 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
4361 xpt_freeze_devq(ccb->ccb_h.path, 1);
4365 isp->isp_state = ISP_RUNSTATE;
4367 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
4368 ISP_PCMD(ccb) = NULL;
4370 switch (ccb->ccb_h.func_code) {
4371 case XPT_SCSI_IO: /* Execute the requested I/O operation */
4372 bus = XS_CHANNEL(ccb);
4374 * Do a couple of preliminary checks...
4376 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
4377 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
4378 ccb->ccb_h.status = CAM_REQ_INVALID;
4384 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
4385 xpt_print(ccb->ccb_h.path, "invalid target\n");
4386 ccb->ccb_h.status = CAM_PATH_INVALID;
4387 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
4388 xpt_print(ccb->ccb_h.path, "invalid lun\n");
4389 ccb->ccb_h.status = CAM_PATH_INVALID;
4391 if (ccb->ccb_h.status == CAM_PATH_INVALID) {
4396 ccb->csio.scsi_status = SCSI_STATUS_OK;
4397 if (isp_get_pcmd(isp, ccb)) {
4398 isp_prt(isp, ISP_LOGWARN, "out of PCMDs");
4399 cam_freeze_devq(ccb->ccb_h.path);
4400 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0);
4404 error = isp_start((XS_T *) ccb);
4407 XS_CMD_S_CLEAR(ccb);
4408 ccb->ccb_h.status |= CAM_SIM_QUEUED;
4409 if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) {
4412 ts = ccb->ccb_h.timeout;
4413 if (ts == CAM_TIME_DEFAULT) {
4416 ts = isp_mstohz(ts);
4418 callout_reset(&PISP_PCMD(ccb)->wdog, ts, isp_watchdog, ccb);
4422 * We get this result for FC devices if the loop state isn't ready yet
4423 * or if the device in question has gone zombie on us.
4425 * If we've never seen Loop UP at all, we requeue this request and wait
4426 * for the initial loop up delay to expire.
4428 lim = ISP_FC_PC(isp, bus)->loop_down_limit;
4429 if (FCPARAM(isp, bus)->loop_seen_once == 0 || ISP_FC_PC(isp, bus)->loop_down_time >= lim) {
4430 if (FCPARAM(isp, bus)->loop_seen_once == 0) {
4431 isp_prt(isp, ISP_LOGDEBUG0, "%d.%d loop not seen yet @ %lu", XS_TGT(ccb), XS_LUN(ccb), (unsigned long) time_uptime);
4433 isp_prt(isp, ISP_LOGDEBUG0, "%d.%d downtime (%d) > lim (%d)", XS_TGT(ccb), XS_LUN(ccb), ISP_FC_PC(isp, bus)->loop_down_time, lim);
4435 ccb->ccb_h.status = CAM_SEL_TIMEOUT|CAM_DEV_QFRZN;
4436 xpt_freeze_devq(ccb->ccb_h.path, 1);
4437 isp_free_pcmd(isp, ccb);
4441 isp_prt(isp, ISP_LOGDEBUG0, "%d.%d retry later", XS_TGT(ccb), XS_LUN(ccb));
4442 cam_freeze_devq(ccb->ccb_h.path);
4443 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0);
4444 XS_SETERR(ccb, CAM_REQUEUE_REQ);
4445 isp_free_pcmd(isp, ccb);
4449 isp_free_pcmd(isp, ccb);
4450 cam_freeze_devq(ccb->ccb_h.path);
4451 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 100, 0);
4452 XS_SETERR(ccb, CAM_REQUEUE_REQ);
4456 isp_done((struct ccb_scsiio *) ccb);
4459 isp_prt(isp, ISP_LOGERR, "What's this? 0x%x at %d in file %s", error, __LINE__, __FILE__);
4460 XS_SETERR(ccb, CAM_REQ_CMP_ERR);
4461 isp_free_pcmd(isp, ccb);
4466 #ifdef ISP_TARGET_MODE
4467 case XPT_EN_LUN: /* Enable/Disable LUN as a target */
4468 if (ccb->cel.enable) {
4469 isp_enable_lun(isp, ccb);
4471 isp_disable_lun(isp, ccb);
4474 case XPT_IMMED_NOTIFY:
4475 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */
4476 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
4478 tstate_t *tptr = get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
4480 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), CAM_LUN_WILDCARD);
4486 if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) {
4487 str = "XPT_IMMEDIATE_NOTIFY";
4488 tag = ccb->cin1.seq_id;
4490 tag = ccb->atio.tag_id;
4491 str = "XPT_ACCEPT_TARGET_IO";
4493 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] no state pointer found for %s\n", __func__, tag, str);
4494 dump_tstates(isp, XS_CHANNEL(ccb));
4495 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
4498 ccb->ccb_h.sim_priv.entries[0].field = 0;
4499 ccb->ccb_h.sim_priv.entries[1].ptr = isp;
4500 ccb->ccb_h.flags = 0;
4502 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4503 if (ccb->atio.tag_id) {
4504 atio_private_data_t *atp = isp_get_atpd(isp, tptr, ccb->atio.tag_id);
4506 isp_put_atpd(isp, tptr, atp);
4510 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, sim_links.sle);
4511 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "Put FREE ATIO (tag id 0x%x), count now %d\n",
4512 ccb->atio.tag_id, tptr->atio_count);
4513 } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) {
4514 if (ccb->cin1.tag_id) {
4515 inot_private_data_t *ntp = isp_find_ntpd(isp, tptr, ccb->cin1.tag_id, ccb->cin1.seq_id);
4517 isp_put_ntpd(isp, tptr, ntp);
4521 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle);
4522 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "Put FREE INOT, (seq id 0x%x) count now %d\n",
4523 ccb->cin1.seq_id, tptr->inot_count);
4524 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4526 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle);
4527 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "Put FREE INOT, (seq id 0x%x) count now %d\n",
4528 ccb->cin1.seq_id, tptr->inot_count);
4530 rls_lun_statep(isp, tptr);
4531 ccb->ccb_h.status = CAM_REQ_INPROG;
4534 case XPT_NOTIFY_ACK:
4535 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
4537 case XPT_NOTIFY_ACKNOWLEDGE: /* notify ack */
4540 inot_private_data_t *ntp;
4543 * XXX: Because we cannot guarantee that the path information in the notify acknowledge ccb
4544 * XXX: matches that for the immediate notify, we have to *search* for the notify structure
4547 * All the relevant path information is in the associated immediate notify
4549 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] NOTIFY ACKNOWLEDGE for 0x%x seen\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id);
4550 ntp = get_ntp_from_tagdata(isp, ccb->cna2.tag_id, ccb->cna2.seq_id, &tptr);
4552 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] XPT_NOTIFY_ACKNOWLEDGE of 0x%x cannot find ntp private data\n", __func__,
4553 ccb->cna2.tag_id, ccb->cna2.seq_id);
4554 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
4558 if (isp_handle_platform_target_notify_ack(isp, &ntp->rd.nt)) {
4559 rls_lun_statep(isp, tptr);
4560 cam_freeze_devq(ccb->ccb_h.path);
4561 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0);
4562 XS_SETERR(ccb, CAM_REQUEUE_REQ);
4565 isp_put_ntpd(isp, tptr, ntp);
4566 rls_lun_statep(isp, tptr);
4567 ccb->ccb_h.status = CAM_REQ_CMP;
4568 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] calling xpt_done for tag 0x%x\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id);
4572 case XPT_CONT_TARGET_IO:
4573 isp_target_start_ctio(isp, ccb);
4576 case XPT_RESET_DEV: /* BDR the specified SCSI device */
4578 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
4579 tgt = ccb->ccb_h.target_id;
4582 error = isp_control(isp, ISPCTL_RESET_DEV, bus, tgt);
4584 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
4586 ccb->ccb_h.status = CAM_REQ_CMP;
4590 case XPT_ABORT: /* Abort the specified CCB */
4592 union ccb *accb = ccb->cab.abort_ccb;
4593 switch (accb->ccb_h.func_code) {
4594 #ifdef ISP_TARGET_MODE
4595 case XPT_ACCEPT_TARGET_IO:
4596 isp_target_mark_aborted(isp, ccb);
4600 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
4602 ccb->ccb_h.status = CAM_UA_ABORT;
4604 ccb->ccb_h.status = CAM_REQ_CMP;
4608 ccb->ccb_h.status = CAM_REQ_INVALID;
4612 * This is not a queued CCB, so the caller expects it to be
4613 * complete when control is returned.
4617 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
4618 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
4620 if (!IS_CURRENT_SETTINGS(cts)) {
4621 ccb->ccb_h.status = CAM_REQ_INVALID;
4625 tgt = cts->ccb_h.target_id;
4626 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
4628 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
4629 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
4630 sdparam *sdp = SDPARAM(isp, bus);
4633 if (spi->valid == 0 && scsi->valid == 0) {
4634 ccb->ccb_h.status = CAM_REQ_CMP;
4640 * We always update (internally) from goal_flags
4641 * so any request to change settings just gets
4642 * vectored to that location.
4644 dptr = &sdp->isp_devparam[tgt].goal_flags;
4646 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
4647 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
4648 *dptr |= DPARM_DISC;
4650 *dptr &= ~DPARM_DISC;
4653 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
4654 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
4655 *dptr |= DPARM_TQING;
4657 *dptr &= ~DPARM_TQING;
4660 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
4661 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
4662 *dptr |= DPARM_WIDE;
4664 *dptr &= ~DPARM_WIDE;
4670 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && (spi->valid & CTS_SPI_VALID_SYNC_RATE) && (spi->sync_period && spi->sync_offset)) {
4671 *dptr |= DPARM_SYNC;
4673 * XXX: CHECK FOR LEGALITY
4675 sdp->isp_devparam[tgt].goal_period = spi->sync_period;
4676 sdp->isp_devparam[tgt].goal_offset = spi->sync_offset;
4678 *dptr &= ~DPARM_SYNC;
4680 isp_prt(isp, ISP_LOGDEBUG0, "SET (%d.%d.%d) to flags %x off %x per %x", bus, tgt, cts->ccb_h.target_lun, sdp->isp_devparam[tgt].goal_flags,
4681 sdp->isp_devparam[tgt].goal_offset, sdp->isp_devparam[tgt].goal_period);
4682 sdp->isp_devparam[tgt].dev_update = 1;
4685 ccb->ccb_h.status = CAM_REQ_CMP;
4688 case XPT_GET_TRAN_SETTINGS:
4690 tgt = cts->ccb_h.target_id;
4691 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
4693 fcparam *fcp = FCPARAM(isp, bus);
4694 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
4695 struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc;
4696 unsigned int hdlidx;
4698 cts->protocol = PROTO_SCSI;
4699 cts->protocol_version = SCSI_REV_2;
4700 cts->transport = XPORT_FC;
4701 cts->transport_version = 0;
4703 scsi->valid = CTS_SCSI_VALID_TQ;
4704 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
4705 fc->valid = CTS_FC_VALID_SPEED;
4706 fc->bitrate = 100000;
4707 fc->bitrate *= fcp->isp_gbspeed;
4708 hdlidx = fcp->isp_dev_map[tgt] - 1;
4709 if (hdlidx < MAX_FC_TARG) {
4710 fcportdb_t *lp = &fcp->portdb[hdlidx];
4711 fc->wwnn = lp->node_wwn;
4712 fc->wwpn = lp->port_wwn;
4713 fc->port = lp->portid;
4714 fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
4717 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
4718 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
4719 sdparam *sdp = SDPARAM(isp, bus);
4720 uint16_t dval, pval, oval;
4722 if (IS_CURRENT_SETTINGS(cts)) {
4723 sdp->isp_devparam[tgt].dev_refresh = 1;
4725 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS, bus);
4726 dval = sdp->isp_devparam[tgt].actv_flags;
4727 oval = sdp->isp_devparam[tgt].actv_offset;
4728 pval = sdp->isp_devparam[tgt].actv_period;
4730 dval = sdp->isp_devparam[tgt].nvrm_flags;
4731 oval = sdp->isp_devparam[tgt].nvrm_offset;
4732 pval = sdp->isp_devparam[tgt].nvrm_period;
4735 cts->protocol = PROTO_SCSI;
4736 cts->protocol_version = SCSI_REV_2;
4737 cts->transport = XPORT_SPI;
4738 cts->transport_version = 2;
4744 if (dval & DPARM_DISC) {
4745 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
4747 if ((dval & DPARM_SYNC) && oval && pval) {
4748 spi->sync_offset = oval;
4749 spi->sync_period = pval;
4751 spi->sync_offset = 0;
4752 spi->sync_period = 0;
4754 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
4755 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
4756 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
4757 if (dval & DPARM_WIDE) {
4758 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
4760 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
4762 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
4763 scsi->valid = CTS_SCSI_VALID_TQ;
4764 if (dval & DPARM_TQING) {
4765 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
4767 spi->valid |= CTS_SPI_VALID_DISC;
4769 isp_prt(isp, ISP_LOGDEBUG0, "GET %s (%d.%d.%d) to flags %x off %x per %x", IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
4770 bus, tgt, cts->ccb_h.target_lun, dval, oval, pval);
4772 ccb->ccb_h.status = CAM_REQ_CMP;
4776 case XPT_CALC_GEOMETRY:
4777 cam_calc_geometry(&ccb->ccg, 1);
4781 case XPT_RESET_BUS: /* Reset the specified bus */
4782 bus = cam_sim_bus(sim);
4783 error = isp_control(isp, ISPCTL_RESET_BUS, bus);
4785 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
4790 xpt_print(ccb->ccb_h.path, "reset bus on channel %d\n", bus);
4793 xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, 0);
4795 xpt_async(AC_BUS_RESET, ISP_SPI_PC(isp, bus)->path, 0);
4797 ccb->ccb_h.status = CAM_REQ_CMP;
4801 case XPT_TERM_IO: /* Terminate the I/O process */
4802 ccb->ccb_h.status = CAM_REQ_INVALID;
4806 case XPT_SET_SIM_KNOB: /* Set SIM knobs */
4808 struct ccb_sim_knob *kp = &ccb->knob;
4812 ccb->ccb_h.status = CAM_REQ_INVALID;
4817 bus = cam_sim_bus(xpt_path_sim(kp->ccb_h.path));
4818 fcp = FCPARAM(isp, bus);
4820 if (kp->xport_specific.fc.valid & KNOB_VALID_ADDRESS) {
4821 fcp->isp_wwnn = ISP_FC_PC(isp, bus)->def_wwnn = kp->xport_specific.fc.wwnn;
4822 fcp->isp_wwpn = ISP_FC_PC(isp, bus)->def_wwpn = kp->xport_specific.fc.wwpn;
4823 isp_prt(isp, ISP_LOGALL, "Setting Channel %d wwns to 0x%jx 0x%jx", bus, fcp->isp_wwnn, fcp->isp_wwpn);
4825 ccb->ccb_h.status = CAM_REQ_CMP;
4826 if (kp->xport_specific.fc.valid & KNOB_VALID_ROLE) {
4830 switch (kp->xport_specific.fc.role) {
4831 case KNOB_ROLE_NONE:
4832 if (fcp->role != ISP_ROLE_NONE) {
4834 newrole = ISP_ROLE_NONE;
4837 case KNOB_ROLE_TARGET:
4838 if (fcp->role != ISP_ROLE_TARGET) {
4840 newrole = ISP_ROLE_TARGET;
4843 case KNOB_ROLE_INITIATOR:
4844 if (fcp->role != ISP_ROLE_INITIATOR) {
4846 newrole = ISP_ROLE_INITIATOR;
4849 case KNOB_ROLE_BOTH:
4851 if (fcp->role != ISP_ROLE_BOTH) {
4853 newrole = ISP_ROLE_BOTH;
4857 * We don't really support dual role at present on FC cards.
4859 * We should, but a bunch of things are currently broken,
4860 * so don't allow it.
4862 isp_prt(isp, ISP_LOGERR, "cannot support dual role at present");
4863 ccb->ccb_h.status = CAM_REQ_INVALID;
4868 ISP_PATH_PRT(isp, ISP_LOGCONFIG, ccb->ccb_h.path, "changing role on from %d to %d\n", fcp->role, newrole);
4869 if (isp_fc_change_role(isp, bus, newrole) != 0) {
4870 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
4871 #ifdef ISP_TARGET_MODE
4872 } else if (newrole == ISP_ROLE_TARGET || newrole == ISP_ROLE_BOTH) {
4873 ccb->ccb_h.status = isp_enable_deferred_luns(isp, bus);
4881 case XPT_GET_SIM_KNOB: /* Get SIM knobs */
4883 struct ccb_sim_knob *kp = &ccb->knob;
4888 bus = cam_sim_bus(xpt_path_sim(kp->ccb_h.path));
4889 fcp = FCPARAM(isp, bus);
4891 kp->xport_specific.fc.wwnn = fcp->isp_wwnn;
4892 kp->xport_specific.fc.wwpn = fcp->isp_wwpn;
4893 switch (fcp->role) {
4895 kp->xport_specific.fc.role = KNOB_ROLE_NONE;
4897 case ISP_ROLE_TARGET:
4898 kp->xport_specific.fc.role = KNOB_ROLE_TARGET;
4900 case ISP_ROLE_INITIATOR:
4901 kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR;
4904 kp->xport_specific.fc.role = KNOB_ROLE_BOTH;
4907 kp->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE;
4908 ccb->ccb_h.status = CAM_REQ_CMP;
4910 ccb->ccb_h.status = CAM_REQ_INVALID;
4915 case XPT_PATH_INQ: /* Path routing inquiry */
4917 struct ccb_pathinq *cpi = &ccb->cpi;
4919 cpi->version_num = 1;
4920 #ifdef ISP_TARGET_MODE
4921 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
4923 cpi->target_sprt = 0;
4925 cpi->hba_eng_cnt = 0;
4926 cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
4927 cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
4928 cpi->bus_id = cam_sim_bus(sim);
4929 bus = cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
4931 fcparam *fcp = FCPARAM(isp, bus);
4933 cpi->hba_misc = PIM_NOBUSRESET;
4936 * Because our loop ID can shift from time to time,
4937 * make our initiator ID out of range of our bus.
4939 cpi->initiator_id = cpi->max_target + 1;
4942 * Set base transfer capabilities for Fibre Channel, for this HBA.
4945 cpi->base_transfer_speed = 8000000;
4946 } else if (IS_24XX(isp)) {
4947 cpi->base_transfer_speed = 4000000;
4948 } else if (IS_23XX(isp)) {
4949 cpi->base_transfer_speed = 2000000;
4951 cpi->base_transfer_speed = 1000000;
4953 cpi->hba_inquiry = PI_TAG_ABLE;
4954 cpi->transport = XPORT_FC;
4955 cpi->transport_version = 0;
4956 cpi->xport_specific.fc.wwnn = fcp->isp_wwnn;
4957 cpi->xport_specific.fc.wwpn = fcp->isp_wwpn;
4958 cpi->xport_specific.fc.port = fcp->isp_portid;
4959 cpi->xport_specific.fc.bitrate = fcp->isp_gbspeed * 1000;
4961 sdparam *sdp = SDPARAM(isp, bus);
4962 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
4964 cpi->initiator_id = sdp->isp_initiator_id;
4965 cpi->base_transfer_speed = 3300;
4966 cpi->transport = XPORT_SPI;
4967 cpi->transport_version = 2;
4969 cpi->protocol = PROTO_SCSI;
4970 cpi->protocol_version = SCSI_REV_2;
4971 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
4972 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
4973 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
4974 cpi->unit_number = cam_sim_unit(sim);
4975 cpi->ccb_h.status = CAM_REQ_CMP;
4980 ccb->ccb_h.status = CAM_REQ_INVALID;
4986 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
4989 isp_done(XS_T *sccb)
4991 ispsoftc_t *isp = XS_ISP(sccb);
4995 XS_SETERR(sccb, CAM_REQ_CMP);
4997 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (sccb->scsi_status != SCSI_STATUS_OK)) {
4998 sccb->ccb_h.status &= ~CAM_STATUS_MASK;
4999 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
5000 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
5002 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
5006 sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5007 status = sccb->ccb_h.status & CAM_STATUS_MASK;
5008 if (status != CAM_REQ_CMP) {
5009 if (status != CAM_SEL_TIMEOUT)
5010 isp_prt(isp, ISP_LOGDEBUG0, "target %d lun %d CAM status 0x%x SCSI status 0x%x", XS_TGT(sccb), XS_LUN(sccb), sccb->ccb_h.status, sccb->scsi_status);
5011 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
5012 sccb->ccb_h.status |= CAM_DEV_QFRZN;
5013 xpt_freeze_devq(sccb->ccb_h.path, 1);
5017 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) && (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5018 xpt_print(sccb->ccb_h.path, "cam completion status 0x%x\n", sccb->ccb_h.status);
5021 XS_CMD_S_DONE(sccb);
5022 if (XS_TACTIVE_P(sccb))
5023 callout_stop(&PISP_PCMD(sccb)->wdog);
5024 XS_CMD_S_CLEAR(sccb);
5025 isp_free_pcmd(isp, (union ccb *) sccb);
5026 xpt_done((union ccb *) sccb);
5030 isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
5033 static const char prom[] = "Chan %d PortID 0x%06x handle 0x%x role %s %s WWPN 0x%08x%08x";
5034 static const char prom2[] = "Chan %d PortID 0x%06x handle 0x%x role %s %s tgt %u WWPN 0x%08x%08x";
5039 struct cam_path *tmppath;
5043 case ISPASYNC_NEW_TGT_PARAMS:
5045 struct ccb_trans_settings_scsi *scsi;
5046 struct ccb_trans_settings_spi *spi;
5049 struct ccb_trans_settings cts;
5051 memset(&cts, 0, sizeof (struct ccb_trans_settings));
5054 bus = va_arg(ap, int);
5055 tgt = va_arg(ap, int);
5057 sdp = SDPARAM(isp, bus);
5059 if (xpt_create_path(&tmppath, NULL, cam_sim_path(ISP_SPI_PC(isp, bus)->sim), tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
5060 isp_prt(isp, ISP_LOGWARN, "isp_async cannot make temp path for %d.%d", tgt, bus);
5063 flags = sdp->isp_devparam[tgt].actv_flags;
5064 cts.type = CTS_TYPE_CURRENT_SETTINGS;
5065 cts.protocol = PROTO_SCSI;
5066 cts.transport = XPORT_SPI;
5068 scsi = &cts.proto_specific.scsi;
5069 spi = &cts.xport_specific.spi;
5071 if (flags & DPARM_TQING) {
5072 scsi->valid |= CTS_SCSI_VALID_TQ;
5073 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
5076 if (flags & DPARM_DISC) {
5077 spi->valid |= CTS_SPI_VALID_DISC;
5078 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
5080 spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
5081 if (flags & DPARM_WIDE) {
5082 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
5084 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
5086 if (flags & DPARM_SYNC) {
5087 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
5088 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
5089 spi->sync_period = sdp->isp_devparam[tgt].actv_period;
5090 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
5092 isp_prt(isp, ISP_LOGDEBUG2, "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x", bus, tgt, sdp->isp_devparam[tgt].actv_period, sdp->isp_devparam[tgt].actv_offset, flags);
5093 xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
5094 xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
5095 xpt_free_path(tmppath);
5098 case ISPASYNC_BUS_RESET:
5101 bus = va_arg(ap, int);
5103 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", bus);
5105 xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, NULL);
5107 xpt_async(AC_BUS_RESET, ISP_SPI_PC(isp, bus)->path, NULL);
5113 msg = "LIP Received";
5116 case ISPASYNC_LOOP_RESET:
5121 case ISPASYNC_LOOP_DOWN:
5127 bus = va_arg(ap, int);
5130 FCPARAM(isp, bus)->link_active = 0;
5132 fc = ISP_FC_PC(isp, bus);
5133 if (cmd == ISPASYNC_LOOP_DOWN && fc->ready) {
5135 * We don't do any simq freezing if we are only in target mode
5137 if (FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) {
5139 isp_freeze_loopdown(isp, bus, msg);
5141 if (!callout_active(&fc->ldt)) {
5142 callout_reset(&fc->ldt, fc->loop_down_limit * hz, isp_ldt, fc);
5143 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Starting Loop Down Timer @ %lu", (unsigned long) time_uptime);
5147 isp_prt(isp, ISP_LOGINFO, "Chan %d: %s", bus, msg);
5150 case ISPASYNC_LOOP_UP:
5152 bus = va_arg(ap, int);
5154 fc = ISP_FC_PC(isp, bus);
5156 * Now we just note that Loop has come up. We don't
5157 * actually do anything because we're waiting for a
5158 * Change Notify before activating the FC cleanup
5159 * thread to look at the state of the loop again.
5161 FCPARAM(isp, bus)->link_active = 1;
5163 fc->loop_down_time = 0;
5164 isp_prt(isp, ISP_LOGINFO, "Chan %d Loop UP", bus);
5166 case ISPASYNC_DEV_ARRIVED:
5168 bus = va_arg(ap, int);
5169 lp = va_arg(ap, fcportdb_t *);
5171 fc = ISP_FC_PC(isp, bus);
5174 if ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) && (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) {
5175 int dbidx = lp - FCPARAM(isp, bus)->portdb;
5178 for (i = 0; i < MAX_FC_TARG; i++) {
5179 if (i >= FL_ID && i <= SNS_ID) {
5182 if (FCPARAM(isp, bus)->isp_dev_map[i] == 0) {
5186 if (i < MAX_FC_TARG) {
5187 FCPARAM(isp, bus)->isp_dev_map[i] = dbidx + 1;
5188 lp->dev_map_idx = i + 1;
5190 isp_prt(isp, ISP_LOGWARN, "out of target ids");
5191 isp_dump_portdb(isp, bus);
5194 if (lp->dev_map_idx) {
5195 tgt = lp->dev_map_idx - 1;
5196 isp_prt(isp, ISP_LOGCONFIG, prom2, bus, lp->portid, lp->handle, roles[lp->roles], "arrived at", tgt, (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn);
5197 isp_make_here(isp, bus, tgt);
5199 isp_prt(isp, ISP_LOGCONFIG, prom, bus, lp->portid, lp->handle, roles[lp->roles], "arrived", (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn);
5202 case ISPASYNC_DEV_CHANGED:
5204 bus = va_arg(ap, int);
5205 lp = va_arg(ap, fcportdb_t *);
5207 fc = ISP_FC_PC(isp, bus);
5210 if (isp_change_is_bad) {
5211 lp->state = FC_PORTDB_STATE_NIL;
5212 if (lp->dev_map_idx) {
5213 tgt = lp->dev_map_idx - 1;
5214 FCPARAM(isp, bus)->isp_dev_map[tgt] = 0;
5215 lp->dev_map_idx = 0;
5216 isp_prt(isp, ISP_LOGCONFIG, prom3, bus, lp->portid, tgt, "change is bad");
5217 isp_make_gone(isp, bus, tgt);
5219 isp_prt(isp, ISP_LOGCONFIG, prom, bus, lp->portid, lp->handle, roles[lp->roles], "changed and departed",
5220 (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn);
5223 lp->portid = lp->new_portid;
5224 lp->roles = lp->new_roles;
5225 if (lp->dev_map_idx) {
5226 int t = lp->dev_map_idx - 1;
5227 FCPARAM(isp, bus)->isp_dev_map[t] = (lp - FCPARAM(isp, bus)->portdb) + 1;
5228 tgt = lp->dev_map_idx - 1;
5229 isp_prt(isp, ISP_LOGCONFIG, prom2, bus, lp->portid, lp->handle, roles[lp->roles], "changed at", tgt,
5230 (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn);
5232 isp_prt(isp, ISP_LOGCONFIG, prom, bus, lp->portid, lp->handle, roles[lp->roles], "changed", (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn);
5236 case ISPASYNC_DEV_STAYED:
5238 bus = va_arg(ap, int);
5239 lp = va_arg(ap, fcportdb_t *);
5241 if (lp->dev_map_idx) {
5242 tgt = lp->dev_map_idx - 1;
5243 isp_prt(isp, ISP_LOGCONFIG, prom2, bus, lp->portid, lp->handle, roles[lp->roles], "stayed at", tgt,
5244 (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn);
5246 isp_prt(isp, ISP_LOGCONFIG, prom, bus, lp->portid, lp->handle, roles[lp->roles], "stayed",
5247 (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn);
5250 case ISPASYNC_DEV_GONE:
5252 bus = va_arg(ap, int);
5253 lp = va_arg(ap, fcportdb_t *);
5255 fc = ISP_FC_PC(isp, bus);
5257 * If this has a virtual target and we haven't marked it
5258 * that we're going to have isp_gdt tell the OS it's gone,
5259 * set the isp_gdt timer running on it.
5261 * If it isn't marked that isp_gdt is going to get rid of it,
5262 * announce that it's gone.
5265 if (lp->dev_map_idx && lp->reserved == 0) {
5267 lp->state = FC_PORTDB_STATE_ZOMBIE;
5268 lp->gone_timer = ISP_FC_PC(isp, bus)->gone_device_time;
5269 if (fc->ready && !callout_active(&fc->gdt)) {
5270 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Chan %d Starting Gone Device Timer with %u seconds time now %lu", bus, lp->gone_timer, (unsigned long)time_uptime);
5271 callout_reset(&fc->gdt, hz, isp_gdt, fc);
5273 tgt = lp->dev_map_idx - 1;
5274 isp_prt(isp, ISP_LOGCONFIG, prom2, bus, lp->portid, lp->handle, roles[lp->roles], "gone zombie at", tgt, (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn);
5275 } else if (lp->reserved == 0) {
5276 isp_prt(isp, ISP_LOGCONFIG, prom, bus, lp->portid, lp->handle, roles[lp->roles], "departed", (uint32_t) (lp->port_wwn >> 32), (uint32_t) lp->port_wwn);
5279 case ISPASYNC_CHANGE_NOTIFY:
5282 int evt, nphdl, nlstate, reason;
5285 bus = va_arg(ap, int);
5286 evt = va_arg(ap, int);
5287 if (IS_24XX(isp) && evt == ISPASYNC_CHANGE_PDB) {
5288 nphdl = va_arg(ap, int);
5289 nlstate = va_arg(ap, int);
5290 reason = va_arg(ap, int);
5293 nlstate = reason = 0;
5296 fc = ISP_FC_PC(isp, bus);
5298 if (evt == ISPASYNC_CHANGE_PDB) {
5299 msg = "Chan %d Port Database Changed";
5300 } else if (evt == ISPASYNC_CHANGE_SNS) {
5301 msg = "Chan %d Name Server Database Changed";
5303 msg = "Chan %d Other Change Notify";
5307 * If the loop down timer is running, cancel it.
5309 if (fc->ready && callout_active(&fc->ldt)) {
5310 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Stopping Loop Down Timer @ %lu", (unsigned long) time_uptime);
5311 callout_stop(&fc->ldt);
5313 isp_prt(isp, ISP_LOGINFO, msg, bus);
5314 if (FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) {
5315 isp_freeze_loopdown(isp, bus, msg);
5320 #ifdef ISP_TARGET_MODE
5321 case ISPASYNC_TARGET_NOTIFY:
5323 isp_notify_t *notify;
5325 notify = va_arg(ap, isp_notify_t *);
5327 switch (notify->nt_ncode) {
5329 case NT_ABORT_TASK_SET:
5331 case NT_CLEAR_TASK_SET:
5333 case NT_TARGET_RESET:
5335 * These are task management functions.
5337 isp_handle_platform_target_tmf(isp, notify);
5344 * No action need be taken here.
5348 isp_del_all_wwn_entries(isp, ISP_NOCHAN);
5352 * This is device arrival/departure notification
5354 isp_handle_platform_target_notify_ack(isp, notify);
5358 struct ac_contract ac;
5359 struct ac_device_changed *fc;
5361 ac.contract_number = AC_CONTRACT_DEV_CHG;
5362 fc = (struct ac_device_changed *) ac.contract_data;
5363 fc->wwpn = notify->nt_wwn;
5364 fc->port = notify->nt_sid;
5365 fc->target = notify->nt_nphdl;
5367 xpt_async(AC_CONTRACT, ISP_FC_PC(isp, notify->nt_channel)->path, &ac);
5372 struct ac_contract ac;
5373 struct ac_device_changed *fc;
5375 ac.contract_number = AC_CONTRACT_DEV_CHG;
5376 fc = (struct ac_device_changed *) ac.contract_data;
5377 fc->wwpn = notify->nt_wwn;
5378 fc->port = notify->nt_sid;
5379 fc->target = notify->nt_nphdl;
5381 xpt_async(AC_CONTRACT, ISP_FC_PC(isp, notify->nt_channel)->path, &ac);
5385 isp_prt(isp, ISP_LOGALL, "target notify code 0x%x", notify->nt_ncode);
5386 isp_handle_platform_target_notify_ack(isp, notify);
5391 case ISPASYNC_TARGET_ACTION:
5396 hp = va_arg(ap, isphdr_t *);
5398 switch (hp->rqs_entry_type) {
5400 isp_prt(isp, ISP_LOGWARN, "%s: unhandled target action 0x%x", __func__, hp->rqs_entry_type);
5402 case RQSTYPE_NOTIFY:
5404 isp_handle_platform_notify_scsi(isp, (in_entry_t *) hp);
5405 } else if (IS_24XX(isp)) {
5406 isp_handle_platform_notify_24xx(isp, (in_fcentry_24xx_t *) hp);
5408 isp_handle_platform_notify_fc(isp, (in_fcentry_t *) hp);
5413 isp_handle_platform_atio7(isp, (at7_entry_t *) hp);
5415 isp_handle_platform_atio(isp, (at_entry_t *) hp);
5419 isp_handle_platform_atio2(isp, (at2_entry_t *) hp);
5425 isp_handle_platform_ctio(isp, hp);
5427 case RQSTYPE_ABTS_RCVD:
5429 abts_t *abts = (abts_t *)hp;
5430 isp_notify_t notify, *nt = ¬ify;
5436 did = (abts->abts_did_hi << 16) | abts->abts_did_lo;
5437 sid = (abts->abts_sid_hi << 16) | abts->abts_sid_lo;
5438 ISP_MEMZERO(nt, sizeof (isp_notify_t));
5442 nt->nt_nphdl = abts->abts_nphdl;
5444 isp_find_chan_by_did(isp, did, &chan);
5445 if (chan == ISP_NOCHAN) {
5446 nt->nt_tgt = TGT_ANY;
5448 nt->nt_tgt = FCPARAM(isp, chan)->isp_wwpn;
5449 if (isp_find_pdb_by_loopid(isp, chan, abts->abts_nphdl, &lp)) {
5450 nt->nt_wwn = lp->port_wwn;
5452 nt->nt_wwn = INI_ANY;
5456 * Try hard to find the lun for this command.
5458 tptr = get_lun_statep_from_tag(isp, chan, abts->abts_rxid_task);
5460 nt->nt_lun = xpt_path_lun_id(tptr->owner);
5461 rls_lun_statep(isp, tptr);
5463 nt->nt_lun = LUN_ANY;
5465 nt->nt_need_ack = 1;
5466 nt->nt_tagval = abts->abts_rxid_task;
5467 nt->nt_tagval |= (((uint64_t) abts->abts_rxid_abts) << 32);
5468 if (abts->abts_rxid_task == ISP24XX_NO_TASK) {
5469 isp_prt(isp, ISP_LOGTINFO, "[0x%x] ABTS from N-Port handle 0x%x Port 0x%06x has no task id (rx_id 0x%04x ox_id 0x%04x)",
5470 abts->abts_rxid_abts, abts->abts_nphdl, sid, abts->abts_rx_id, abts->abts_ox_id);
5472 isp_prt(isp, ISP_LOGTINFO, "[0x%x] ABTS from N-Port handle 0x%x Port 0x%06x for task 0x%x (rx_id 0x%04x ox_id 0x%04x)",
5473 abts->abts_rxid_abts, abts->abts_nphdl, sid, abts->abts_rxid_task, abts->abts_rx_id, abts->abts_ox_id);
5475 nt->nt_channel = chan;
5476 nt->nt_ncode = NT_ABORT_TASK;
5477 nt->nt_lreserved = hp;
5478 isp_handle_platform_target_tmf(isp, nt);
5481 case RQSTYPE_ENABLE_LUN:
5482 case RQSTYPE_MODIFY_LUN:
5483 isp_ledone(isp, (lun_entry_t *) hp);
5489 case ISPASYNC_FW_CRASH:
5491 uint16_t mbox1, mbox6;
5492 mbox1 = ISP_READ(isp, OUTMAILBOX1);
5493 if (IS_DUALBUS(isp)) {
5494 mbox6 = ISP_READ(isp, OUTMAILBOX6);
5498 isp_prt(isp, ISP_LOGERR, "Internal Firmware Error on bus %d @ RISC Address 0x%x", mbox6, mbox1);
5499 mbox1 = isp->isp_osinfo.mbox_sleep_ok;
5500 isp->isp_osinfo.mbox_sleep_ok = 0;
5502 isp->isp_osinfo.mbox_sleep_ok = mbox1;
5503 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
5507 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
5514 * Locks are held before coming here.
5517 isp_uninit(ispsoftc_t *isp)
5520 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RESET);
5522 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
5524 ISP_DISABLE_INTS(isp);
5528 * When we want to get the 'default' WWNs (when lacking NVRAM), we pick them
5529 * up from our platform default (defww{p|n}n) and morph them based upon
5532 * When we want to get the 'active' WWNs, we get NVRAM WWNs and then morph them
5533 * based upon channel.
5537 isp_default_wwn(ispsoftc_t * isp, int chan, int isactive, int iswwnn)
5540 struct isp_fc *fc = ISP_FC_PC(isp, chan);
5543 * If we're asking for a active WWN, the default overrides get
5544 * returned, otherwise the NVRAM value is picked.
5546 * If we're asking for a default WWN, we just pick the default override.
5549 seed = iswwnn ? fc->def_wwnn : fc->def_wwpn;
5553 seed = iswwnn ? FCPARAM(isp, chan)->isp_wwnn_nvram : FCPARAM(isp, chan)->isp_wwpn_nvram;
5557 return (0x400000007F000009ull);
5559 seed = iswwnn ? fc->def_wwnn : fc->def_wwpn;
5564 * For channel zero just return what we have. For either ACTIVE or
5565 * DEFAULT cases, we depend on default override of NVRAM values for
5573 * For other channels, we are doing one of three things:
5575 * 1. If what we have now is non-zero, return it. Otherwise we morph
5576 * values from channel 0. 2. If we're here for a WWPN we synthesize
5577 * it if Channel 0's wwpn has a type 2 NAA. 3. If we're here for a
5578 * WWNN we synthesize it if Channel 0's wwnn has a type 2 NAA.
5585 seed = iswwnn ? FCPARAM(isp, 0)->isp_wwnn_nvram : FCPARAM(isp, 0)->isp_wwpn_nvram;
5587 seed = iswwnn ? ISP_FC_PC(isp, 0)->def_wwnn : ISP_FC_PC(isp, 0)->def_wwpn;
5590 if (((seed >> 60) & 0xf) == 2) {
5592 * The type 2 NAA fields for QLogic cards appear be laid out
5595 * bits 63..60 NAA == 2 bits 59..57 unused/zero bit 56
5596 * port (1) or node (0) WWN distinguishor bit 48
5597 * physical port on dual-port chips (23XX/24XX)
5599 * This is somewhat nutty, particularly since bit 48 is
5600 * irrelevant as they assign separate serial numbers to
5601 * different physical ports anyway.
5603 * We'll stick our channel number plus one first into bits
5604 * 57..59 and thence into bits 52..55 which allows for 8 bits
5605 * of channel which is comfortably more than our maximum
5608 seed &= ~0x0FF0000000000000ULL;
5610 seed |= ((uint64_t) (chan + 1) & 0xf) << 56;
5611 seed |= ((uint64_t) ((chan + 1) >> 4) & 0xf) << 52;
5620 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...)
5626 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
5629 sprintf(lbuf, "%s: ", device_get_nameunit(isp->isp_dev));
5632 vsnprintf(&lbuf[loc], sizeof (lbuf) - loc - 1, fmt, ap);
5634 printf("%s\n", lbuf);
5638 isp_xs_prt(ispsoftc_t *isp, XS_T *xs, int level, const char *fmt, ...)
5641 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
5644 xpt_print_path(xs->ccb_h.path);
5652 isp_nanotime_sub(struct timespec *b, struct timespec *a)
5655 struct timespec x = *b;
5657 elapsed = GET_NANOSEC(&x);
5664 isp_mbox_acquire(ispsoftc_t *isp)
5666 if (isp->isp_osinfo.mboxbsy) {
5669 isp->isp_osinfo.mboxcmd_done = 0;
5670 isp->isp_osinfo.mboxbsy = 1;
5676 isp_mbox_wait_complete(ispsoftc_t *isp, mbreg_t *mbp)
5678 unsigned int usecs = mbp->timeout;
5679 unsigned int max, olim, ilim;
5682 usecs = MBCMD_DEFAULT_TIMEOUT;
5684 max = isp->isp_mbxwrk0 + 1;
5686 if (isp->isp_osinfo.mbox_sleep_ok) {
5687 unsigned int ms = (usecs + 999) / 1000;
5689 isp->isp_osinfo.mbox_sleep_ok = 0;
5690 isp->isp_osinfo.mbox_sleeping = 1;
5691 for (olim = 0; olim < max; olim++) {
5692 msleep(&isp->isp_mbxworkp, &isp->isp_osinfo.lock, PRIBIO, "ispmbx_sleep", isp_mstohz(ms));
5693 if (isp->isp_osinfo.mboxcmd_done) {
5697 isp->isp_osinfo.mbox_sleep_ok = 1;
5698 isp->isp_osinfo.mbox_sleeping = 0;
5700 for (olim = 0; olim < max; olim++) {
5701 for (ilim = 0; ilim < usecs; ilim += 100) {
5703 uint16_t sema, mbox;
5704 if (isp->isp_osinfo.mboxcmd_done) {
5707 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
5708 isp_intr(isp, isr, sema, mbox);
5709 if (isp->isp_osinfo.mboxcmd_done) {
5715 if (isp->isp_osinfo.mboxcmd_done) {
5720 if (isp->isp_osinfo.mboxcmd_done == 0) {
5721 isp_prt(isp, ISP_LOGWARN, "%s Mailbox Command (0x%x) Timeout (%uus) (started @ %s:%d)",
5722 isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled", isp->isp_lastmbxcmd, usecs, mbp->func, mbp->lineno);
5723 mbp->param[0] = MBOX_TIMEOUT;
5724 isp->isp_osinfo.mboxcmd_done = 1;
5729 isp_mbox_notify_done(ispsoftc_t *isp)
5731 if (isp->isp_osinfo.mbox_sleeping) {
5732 wakeup(&isp->isp_mbxworkp);
5734 isp->isp_osinfo.mboxcmd_done = 1;
5738 isp_mbox_release(ispsoftc_t *isp)
5740 isp->isp_osinfo.mboxbsy = 0;
5744 isp_fc_scratch_acquire(ispsoftc_t *isp, int chan)
5747 if (isp->isp_osinfo.pc.fc[chan].fcbsy) {
5750 isp->isp_osinfo.pc.fc[chan].fcbsy = 1;
5760 t.tv_sec = ms / 1000;
5761 t.tv_usec = (ms % 1000) * 1000;
5773 isp_platform_intr(void *arg)
5775 ispsoftc_t *isp = arg;
5777 uint16_t sema, mbox;
5781 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
5782 isp->isp_intbogus++;
5784 isp_intr(isp, isr, sema, mbox);
5790 isp_common_dmateardown(ispsoftc_t *isp, struct ccb_scsiio *csio, uint32_t hdl)
5792 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
5793 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTREAD);
5795 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_POSTWRITE);
5797 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
5801 isp_timer(void *arg)
5803 ispsoftc_t *isp = arg;
5804 #ifdef ISP_TARGET_MODE
5805 isp_tmcmd_restart(isp);
5807 callout_reset(&isp->isp_osinfo.tmo, hz, isp_timer, isp);