2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009-2020 Alexander Motin <mav@FreeBSD.org>
5 * Copyright (c) 1997-2009 by Matthew Jacob
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice immediately at the beginning of the file, without modification,
13 * this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <dev/isp/isp_freebsd.h>
37 #include <sys/unistd.h>
38 #include <sys/kthread.h>
40 #include <sys/module.h>
41 #include <sys/ioccom.h>
42 #include <dev/isp/isp_ioctl.h>
43 #include <sys/devicestat.h>
44 #include <cam/cam_periph.h>
45 #include <cam/cam_xpt_periph.h>
47 MODULE_VERSION(isp, 1);
48 MODULE_DEPEND(isp, cam, 1, 1, 1);
49 int isp_announced = 0;
50 int isp_loop_down_limit = 60; /* default loop down limit */
51 int isp_quickboot_time = 7; /* don't wait more than N secs for loop up */
52 int isp_gone_device_time = 30; /* grace time before reporting device lost */
53 static const char prom3[] = "Chan %d [%u] PortID 0x%06x Departed because of %s";
55 static void isp_freeze_loopdown(ispsoftc_t *, int);
56 static void isp_loop_changed(ispsoftc_t *isp, int chan);
57 static void isp_rq_check_above(ispsoftc_t *);
58 static void isp_rq_check_below(ispsoftc_t *);
59 static d_ioctl_t ispioctl;
60 static void isp_poll(struct cam_sim *);
61 static callout_func_t isp_watchdog;
62 static callout_func_t isp_gdt;
63 static task_fn_t isp_gdt_task;
64 static void isp_kthread(void *);
65 static void isp_action(struct cam_sim *, union ccb *);
66 static int isp_timer_count;
67 static void isp_timer(void *);
69 static struct cdevsw isp_cdevsw = {
70 .d_version = D_VERSION,
76 isp_role_sysctl(SYSCTL_HANDLER_ARGS)
78 ispsoftc_t *isp = (ispsoftc_t *)arg1;
80 int error, old, value;
82 value = FCPARAM(isp, chan)->role;
84 error = sysctl_handle_int(oidp, &value, 0, req);
85 if ((error != 0) || (req->newptr == NULL))
88 if (value < ISP_ROLE_NONE || value > ISP_ROLE_BOTH)
92 old = FCPARAM(isp, chan)->role;
94 /* We don't allow target mode switch from here. */
95 value = (old & ISP_ROLE_TARGET) | (value & ISP_ROLE_INITIATOR);
97 /* If nothing has changed -- we are done. */
103 /* Actually change the role. */
104 error = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, value);
110 isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan)
113 struct cam_path *path;
114 #ifdef ISP_TARGET_MODE
118 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
119 device_get_unit(isp->isp_dev), &isp->isp_lock,
120 isp->isp_maxcmds, isp->isp_maxcmds, devq);
125 if (xpt_bus_register(sim, isp->isp_dev, chan) != CAM_SUCCESS) {
127 cam_sim_free(sim, FALSE);
131 if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
133 xpt_bus_deregister(cam_sim_path(sim));
135 cam_sim_free(sim, FALSE);
139 fcparam *fcp = FCPARAM(isp, chan);
140 struct isp_fc *fc = ISP_FC_PC(isp, chan);
141 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(isp->isp_osinfo.dev);
142 struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev);
150 fcp->isp_use_gft_id = 1;
151 fcp->isp_use_gff_id = 1;
153 callout_init_mtx(&fc->gdt, &isp->isp_lock, 0);
154 TASK_INIT(&fc->gtask, 1, isp_gdt_task, fc);
155 #ifdef ISP_TARGET_MODE
156 TAILQ_INIT(&fc->waitq);
157 STAILQ_INIT(&fc->ntfree);
158 for (i = 0; i < ATPDPSIZE; i++)
159 STAILQ_INSERT_TAIL(&fc->ntfree, &fc->ntpool[i], next);
160 LIST_INIT(&fc->atfree);
161 for (i = ATPDPSIZE-1; i >= 0; i--)
162 LIST_INSERT_HEAD(&fc->atfree, &fc->atpool[i], next);
163 for (i = 0; i < ATPDPHASHSIZE; i++)
164 LIST_INIT(&fc->atused[i]);
166 isp_loop_changed(isp, chan);
168 if (kproc_create(isp_kthread, fc, &fc->kproc, 0, 0,
169 "%s_%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) {
170 xpt_free_path(fc->path);
172 xpt_bus_deregister(cam_sim_path(fc->sim));
174 cam_sim_free(fc->sim, FALSE);
177 fc->num_threads += 1;
179 snprintf(name, sizeof(name), "chan%d", chan);
180 tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree),
181 OID_AUTO, name, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
184 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
185 "wwnn", CTLFLAG_RD, &fcp->isp_wwnn,
186 "World Wide Node Name");
187 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
188 "wwpn", CTLFLAG_RD, &fcp->isp_wwpn,
189 "World Wide Port Name");
190 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
191 "loop_down_limit", CTLFLAG_RW, &fc->loop_down_limit, 0,
193 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
194 "gone_device_time", CTLFLAG_RW, &fc->gone_device_time, 0,
196 #if defined(ISP_TARGET_MODE) && defined(DEBUG)
197 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
198 "inject_lost_data_frame", CTLFLAG_RW, &fc->inject_lost_data_frame, 0,
199 "Cause a Lost Frame on a Read");
201 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
202 "role", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
203 isp, chan, isp_role_sysctl, "I", "Current role");
204 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
205 "speed", CTLFLAG_RD, &fcp->isp_gbspeed, 0,
206 "Connection speed in gigabits");
207 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
208 "linkstate", CTLFLAG_RD, &fcp->isp_linkstate, 0,
210 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
211 "fwstate", CTLFLAG_RD, &fcp->isp_fwstate, 0,
213 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
214 "loopstate", CTLFLAG_RD, &fcp->isp_loopstate, 0,
216 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
217 "topo", CTLFLAG_RD, &fcp->isp_topo, 0,
218 "Connection topology");
219 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
220 "use_gft_id", CTLFLAG_RWTUN, &fcp->isp_use_gft_id, 0,
221 "Use GFT_ID during fabric scan");
222 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
223 "use_gff_id", CTLFLAG_RWTUN, &fcp->isp_use_gff_id, 0,
224 "Use GFF_ID during fabric scan");
229 isp_detach_chan(ispsoftc_t *isp, int chan)
232 struct cam_path *path;
235 ISP_GET_PC(isp, chan, sim, sim);
236 ISP_GET_PC(isp, chan, path, path);
237 ISP_GET_PC_ADDR(isp, chan, num_threads, num_threads);
240 xpt_bus_deregister(cam_sim_path(sim));
241 cam_sim_free(sim, FALSE);
243 /* Wait for the channel's spawned threads to exit. */
244 wakeup(isp->isp_osinfo.pc.ptr);
245 while (*num_threads != 0)
246 mtx_sleep(isp, &isp->isp_lock, PRIBIO, "isp_reap", 100);
250 isp_attach(ispsoftc_t *isp)
252 const char *nu = device_get_nameunit(isp->isp_osinfo.dev);
253 int du = device_get_unit(isp->isp_dev);
257 * Create the device queue for our SIM(s).
259 isp->isp_osinfo.devq = cam_simq_alloc(isp->isp_maxcmds);
260 if (isp->isp_osinfo.devq == NULL) {
264 for (chan = 0; chan < isp->isp_nchan; chan++) {
265 if (isp_attach_chan(isp, isp->isp_osinfo.devq, chan)) {
270 callout_init_mtx(&isp->isp_osinfo.tmo, &isp->isp_lock, 0);
271 isp_timer_count = hz >> 2;
272 callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp);
274 isp->isp_osinfo.cdev = make_dev(&isp_cdevsw, du, UID_ROOT, GID_OPERATOR, 0600, "%s", nu);
275 if (isp->isp_osinfo.cdev) {
276 isp->isp_osinfo.cdev->si_drv1 = isp;
281 while (--chan >= 0) {
283 struct cam_path *path;
285 ISP_GET_PC(isp, chan, sim, sim);
286 ISP_GET_PC(isp, chan, path, path);
289 xpt_bus_deregister(cam_sim_path(sim));
291 cam_sim_free(sim, FALSE);
293 cam_simq_free(isp->isp_osinfo.devq);
294 isp->isp_osinfo.devq = NULL;
299 isp_detach(ispsoftc_t *isp)
303 if (isp->isp_osinfo.cdev) {
304 destroy_dev(isp->isp_osinfo.cdev);
305 isp->isp_osinfo.cdev = NULL;
308 /* Tell spawned threads that we're exiting. */
309 isp->isp_osinfo.is_exiting = 1;
310 for (chan = isp->isp_nchan - 1; chan >= 0; chan -= 1)
311 isp_detach_chan(isp, chan);
313 callout_drain(&isp->isp_osinfo.tmo);
314 cam_simq_free(isp->isp_osinfo.devq);
319 isp_freeze_loopdown(ispsoftc_t *isp, int chan)
321 struct isp_fc *fc = ISP_FC_PC(isp, chan);
325 if (fc->simqfrozen == 0) {
326 isp_prt(isp, ISP_LOGDEBUG0,
327 "Chan %d Freeze simq (loopdown)", chan);
328 fc->simqfrozen = SIMQFRZ_LOOPDOWN;
330 xpt_freeze_simq(fc->sim, 1);
332 isp_prt(isp, ISP_LOGDEBUG0,
333 "Chan %d Mark simq frozen (loopdown)", chan);
334 fc->simqfrozen |= SIMQFRZ_LOOPDOWN;
339 isp_unfreeze_loopdown(ispsoftc_t *isp, int chan)
341 struct isp_fc *fc = ISP_FC_PC(isp, chan);
345 int wasfrozen = fc->simqfrozen & SIMQFRZ_LOOPDOWN;
346 fc->simqfrozen &= ~SIMQFRZ_LOOPDOWN;
347 if (wasfrozen && fc->simqfrozen == 0) {
348 isp_prt(isp, ISP_LOGDEBUG0,
349 "Chan %d Release simq", chan);
350 xpt_release_simq(fc->sim, 1);
356 * Functions to protect from request queue overflow by freezing SIM queue.
357 * XXX: freezing only one arbitrary SIM, since they all share the queue.
360 isp_rq_check_above(ispsoftc_t *isp)
362 struct isp_fc *fc = ISP_FC_PC(isp, 0);
364 if (isp->isp_rqovf || fc->sim == NULL)
366 if (!isp_rqentry_avail(isp, QENTRY_MAX)) {
367 xpt_freeze_simq(fc->sim, 1);
373 isp_rq_check_below(ispsoftc_t *isp)
375 struct isp_fc *fc = ISP_FC_PC(isp, 0);
377 if (!isp->isp_rqovf || fc->sim == NULL)
379 if (isp_rqentry_avail(isp, QENTRY_MAX)) {
380 xpt_release_simq(fc->sim, 0);
386 ispioctl(struct cdev *dev, u_long c, caddr_t addr, int flags, struct thread *td)
389 int nr, chan, retval = ENOTTY;
396 int olddblev = isp->isp_dblev;
397 isp->isp_dblev = *(int *)addr;
398 *(int *)addr = olddblev;
404 if (chan < 0 || chan >= isp->isp_nchan) {
408 *(int *)addr = FCPARAM(isp, chan)->role;
414 if (chan < 0 || chan >= isp->isp_nchan) {
419 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) {
424 *(int *)addr = FCPARAM(isp, chan)->role;
425 retval = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, nr);
438 chan = *(intptr_t *)addr;
439 if (chan < 0 || chan >= isp->isp_nchan) {
444 if (isp_fc_runstate(isp, chan, 5 * 1000000) != LOOP_READY) {
453 chan = *(intptr_t *)addr;
454 if (chan < 0 || chan >= isp->isp_nchan) {
459 if (isp_control(isp, ISPCTL_SEND_LIP, chan)) {
466 case ISP_FC_GETDINFO:
468 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
471 if (ifc->loopid >= MAX_FC_TARG) {
475 lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid];
476 if (lp->state != FC_PORTDB_STATE_NIL) {
477 ifc->role = (lp->prli_word3 & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT;
478 ifc->loopid = lp->handle;
479 ifc->portid = lp->portid;
480 ifc->node_wwn = lp->node_wwn;
481 ifc->port_wwn = lp->port_wwn;
488 case ISP_FC_GETHINFO:
490 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
491 int chan = hba->fc_channel;
493 if (chan < 0 || chan >= isp->isp_nchan) {
497 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev);
498 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev);
499 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev);
500 hba->fc_nchannels = isp->isp_nchan;
501 hba->fc_nports = MAX_FC_TARG;
502 hba->fc_speed = FCPARAM(isp, hba->fc_channel)->isp_gbspeed;
503 hba->fc_topology = FCPARAM(isp, chan)->isp_topo + 1;
504 hba->fc_loopid = FCPARAM(isp, chan)->isp_loopid;
505 hba->nvram_node_wwn = FCPARAM(isp, chan)->isp_wwnn_nvram;
506 hba->nvram_port_wwn = FCPARAM(isp, chan)->isp_wwpn_nvram;
507 hba->active_node_wwn = FCPARAM(isp, chan)->isp_wwnn;
508 hba->active_port_wwn = FCPARAM(isp, chan)->isp_wwpn;
515 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr;
518 uint8_t resp[QENTRY_LEN];
520 isp24xx_statusreq_t sp;
526 if (chan < 0 || chan >= isp->isp_nchan) {
531 needmarker = retval = 0;
534 fcp = FCPARAM(isp, chan);
536 for (i = 0; i < MAX_FC_TARG; i++) {
537 lp = &fcp->portdb[i];
538 if (lp->handle == nphdl) {
542 if (i == MAX_FC_TARG) {
547 ISP_MEMZERO(&tmf, sizeof(tmf));
548 tmf.tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT;
549 tmf.tmf_header.rqs_entry_count = 1;
550 tmf.tmf_nphdl = lp->handle;
553 tmf.tmf_tidlo = lp->portid;
554 tmf.tmf_tidhi = lp->portid >> 16;
555 tmf.tmf_vpidx = ISP_GET_VPIDX(isp, chan);
556 tmf.tmf_lun[1] = fct->lun & 0xff;
557 if (fct->lun >= 256) {
558 tmf.tmf_lun[0] = 0x40 | (fct->lun >> 8);
560 switch (fct->action) {
562 tmf.tmf_flags = ISP24XX_TMF_CLEAR_ACA;
564 case IPT_TARGET_RESET:
565 tmf.tmf_flags = ISP24XX_TMF_TARGET_RESET;
569 tmf.tmf_flags = ISP24XX_TMF_LUN_RESET;
572 case IPT_CLEAR_TASK_SET:
573 tmf.tmf_flags = ISP24XX_TMF_CLEAR_TASK_SET;
576 case IPT_ABORT_TASK_SET:
577 tmf.tmf_flags = ISP24XX_TMF_ABORT_TASK_SET;
589 /* Prepare space for response in memory */
590 memset(resp, 0xff, sizeof(resp));
591 tmf.tmf_handle = isp_allocate_handle(isp, resp,
593 if (tmf.tmf_handle == 0) {
594 isp_prt(isp, ISP_LOGERR,
595 "%s: TMF of Chan %d out of handles",
602 /* Send request and wait for response. */
603 reqp = isp_getrqentry(isp);
605 isp_prt(isp, ISP_LOGERR,
606 "%s: TMF of Chan %d out of rqent",
608 isp_destroy_handle(isp, tmf.tmf_handle);
613 isp_put_24xx_tmf(isp, &tmf, (isp24xx_tmf_t *)reqp);
614 if (isp->isp_dblev & ISP_LOGDEBUG1)
615 isp_print_bytes(isp, "IOCB TMF", QENTRY_LEN, reqp);
616 ISP_SYNC_REQUEST(isp);
617 if (msleep(resp, &isp->isp_lock, 0, "TMF", 5*hz) == EWOULDBLOCK) {
618 isp_prt(isp, ISP_LOGERR,
619 "%s: TMF of Chan %d timed out",
621 isp_destroy_handle(isp, tmf.tmf_handle);
626 if (isp->isp_dblev & ISP_LOGDEBUG1)
627 isp_print_bytes(isp, "IOCB TMF response", QENTRY_LEN, resp);
628 isp_get_24xx_response(isp, (isp24xx_statusreq_t *)resp, &sp);
630 if (sp.req_completion_status != 0)
647 static ISP_INLINE int isp_get_pcmd(ispsoftc_t *, union ccb *);
648 static ISP_INLINE void isp_free_pcmd(ispsoftc_t *, union ccb *);
650 static ISP_INLINE int
651 isp_get_pcmd(ispsoftc_t *isp, union ccb *ccb)
653 ISP_PCMD(ccb) = isp->isp_osinfo.pcmd_free;
654 if (ISP_PCMD(ccb) == NULL) {
657 isp->isp_osinfo.pcmd_free = ((struct isp_pcmd *)ISP_PCMD(ccb))->next;
661 static ISP_INLINE void
662 isp_free_pcmd(ispsoftc_t *isp, union ccb *ccb)
665 #ifdef ISP_TARGET_MODE
666 PISP_PCMD(ccb)->datalen = 0;
668 PISP_PCMD(ccb)->next = isp->isp_osinfo.pcmd_free;
669 isp->isp_osinfo.pcmd_free = ISP_PCMD(ccb);
670 ISP_PCMD(ccb) = NULL;
675 * Put the target mode functions here, because some are inlines
677 #ifdef ISP_TARGET_MODE
678 static ISP_INLINE tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t);
679 static atio_private_data_t *isp_get_atpd(ispsoftc_t *, int, uint32_t);
680 static atio_private_data_t *isp_find_atpd(ispsoftc_t *, int, uint32_t);
681 static void isp_put_atpd(ispsoftc_t *, int, atio_private_data_t *);
682 static inot_private_data_t *isp_get_ntpd(ispsoftc_t *, int);
683 static inot_private_data_t *isp_find_ntpd(ispsoftc_t *, int, uint32_t, uint32_t);
684 static void isp_put_ntpd(ispsoftc_t *, int, inot_private_data_t *);
685 static cam_status create_lun_state(ispsoftc_t *, int, struct cam_path *, tstate_t **);
686 static void destroy_lun_state(ispsoftc_t *, int, tstate_t *);
687 static void isp_enable_lun(ispsoftc_t *, union ccb *);
688 static void isp_disable_lun(ispsoftc_t *, union ccb *);
689 static callout_func_t isp_refire_notify_ack;
690 static void isp_complete_ctio(ispsoftc_t *isp, union ccb *);
691 enum Start_Ctio_How { FROM_CAM, FROM_TIMER, FROM_SRR, FROM_CTIO_DONE };
692 static void isp_target_start_ctio(ispsoftc_t *, union ccb *, enum Start_Ctio_How);
693 static void isp_handle_platform_atio7(ispsoftc_t *, at7_entry_t *);
694 static void isp_handle_platform_ctio(ispsoftc_t *, ct7_entry_t *);
695 static int isp_handle_platform_target_notify_ack(ispsoftc_t *, isp_notify_t *, uint32_t rsp);
696 static void isp_handle_platform_target_tmf(ispsoftc_t *, isp_notify_t *);
697 static void isp_target_mark_aborted_early(ispsoftc_t *, int chan, tstate_t *, uint32_t);
699 static ISP_INLINE tstate_t *
700 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun)
702 tstate_t *tptr = NULL;
705 if (bus < isp->isp_nchan) {
706 ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(lun)], lhp);
707 SLIST_FOREACH(tptr, lhp, next) {
708 if (tptr->ts_lun == lun)
716 isp_atio_restart(ispsoftc_t *isp, int bus, tstate_t *tptr)
718 inot_private_data_t *ntp;
721 if (STAILQ_EMPTY(&tptr->restart_queue))
724 STAILQ_CONCAT(&rq, &tptr->restart_queue);
725 while ((ntp = STAILQ_FIRST(&rq)) != NULL) {
726 STAILQ_REMOVE_HEAD(&rq, next);
727 isp_prt(isp, ISP_LOGTDEBUG0,
728 "%s: restarting resrc deprived %x", __func__,
729 ((at7_entry_t *)ntp->data)->at_rxid);
730 isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->data);
731 isp_put_ntpd(isp, bus, ntp);
732 if (!STAILQ_EMPTY(&tptr->restart_queue))
735 if (!STAILQ_EMPTY(&rq)) {
736 STAILQ_CONCAT(&rq, &tptr->restart_queue);
737 STAILQ_CONCAT(&tptr->restart_queue, &rq);
739 return (!STAILQ_EMPTY(&tptr->restart_queue));
743 isp_tmcmd_restart(ispsoftc_t *isp)
748 struct isp_ccbq *waitq;
751 for (bus = 0; bus < isp->isp_nchan; bus++) {
752 for (i = 0; i < LUN_HASH_SIZE; i++) {
753 ISP_GET_PC_ADDR(isp, bus, lun_hash[i], lhp);
754 SLIST_FOREACH(tptr, lhp, next)
755 isp_atio_restart(isp, bus, tptr);
759 * We only need to do this once per channel.
761 ISP_GET_PC_ADDR(isp, bus, waitq, waitq);
762 ccb = (union ccb *)TAILQ_FIRST(waitq);
764 TAILQ_REMOVE(waitq, &ccb->ccb_h, sim_links.tqe);
765 isp_target_start_ctio(isp, ccb, FROM_TIMER);
768 isp_rq_check_above(isp);
769 isp_rq_check_below(isp);
772 static atio_private_data_t *
773 isp_get_atpd(ispsoftc_t *isp, int chan, uint32_t tag)
775 struct atpdlist *atfree;
776 struct atpdlist *atused;
777 atio_private_data_t *atp;
779 ISP_GET_PC_ADDR(isp, chan, atfree, atfree);
780 atp = LIST_FIRST(atfree);
782 LIST_REMOVE(atp, next);
784 ISP_GET_PC(isp, chan, atused, atused);
785 LIST_INSERT_HEAD(&atused[ATPDPHASH(tag)], atp, next);
790 static atio_private_data_t *
791 isp_find_atpd(ispsoftc_t *isp, int chan, uint32_t tag)
793 struct atpdlist *atused;
794 atio_private_data_t *atp;
796 ISP_GET_PC(isp, chan, atused, atused);
797 LIST_FOREACH(atp, &atused[ATPDPHASH(tag)], next) {
805 isp_put_atpd(ispsoftc_t *isp, int chan, atio_private_data_t *atp)
807 struct atpdlist *atfree;
810 isp_put_ecmd(isp, atp->ests);
812 LIST_REMOVE(atp, next);
813 memset(atp, 0, sizeof (*atp));
814 ISP_GET_PC_ADDR(isp, chan, atfree, atfree);
815 LIST_INSERT_HEAD(atfree, atp, next);
819 isp_dump_atpd(ispsoftc_t *isp, int chan)
821 atio_private_data_t *atp, *atpool;
822 const char *states[8] = { "Free", "ATIO", "CAM", "CTIO", "LAST_CTIO", "PDON", "?6", "7" };
824 ISP_GET_PC(isp, chan, atpool, atpool);
825 for (atp = atpool; atp < &atpool[ATPDPSIZE]; atp++) {
826 if (atp->state == ATPD_STATE_FREE)
828 isp_prt(isp, ISP_LOGALL, "Chan %d ATP [0x%x] origdlen %u bytes_xfrd %u lun %jx nphdl 0x%04x s_id 0x%06x d_id 0x%06x oxid 0x%04x state %s",
829 chan, atp->tag, atp->orig_datalen, atp->bytes_xfered, (uintmax_t)atp->lun, atp->nphdl, atp->sid, atp->did, atp->oxid, states[atp->state & 0x7]);
833 static inot_private_data_t *
834 isp_get_ntpd(ispsoftc_t *isp, int chan)
836 struct ntpdlist *ntfree;
837 inot_private_data_t *ntp;
839 ISP_GET_PC_ADDR(isp, chan, ntfree, ntfree);
840 ntp = STAILQ_FIRST(ntfree);
842 STAILQ_REMOVE_HEAD(ntfree, next);
846 static inot_private_data_t *
847 isp_find_ntpd(ispsoftc_t *isp, int chan, uint32_t tag_id, uint32_t seq_id)
849 inot_private_data_t *ntp, *ntp2;
851 ISP_GET_PC(isp, chan, ntpool, ntp);
852 ISP_GET_PC_ADDR(isp, chan, ntpool[ATPDPSIZE], ntp2);
853 for (; ntp < ntp2; ntp++) {
854 if (ntp->tag_id == tag_id && ntp->seq_id == seq_id)
861 isp_put_ntpd(ispsoftc_t *isp, int chan, inot_private_data_t *ntp)
863 struct ntpdlist *ntfree;
865 ntp->tag_id = ntp->seq_id = 0;
866 ISP_GET_PC_ADDR(isp, chan, ntfree, ntfree);
867 STAILQ_INSERT_HEAD(ntfree, ntp, next);
871 create_lun_state(ispsoftc_t *isp, int bus, struct cam_path *path, tstate_t **rslt)
877 lun = xpt_path_lun_id(path);
878 tptr = malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
880 return (CAM_RESRC_UNAVAIL);
883 SLIST_INIT(&tptr->atios);
884 SLIST_INIT(&tptr->inots);
885 STAILQ_INIT(&tptr->restart_queue);
886 ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(lun)], lhp);
887 SLIST_INSERT_HEAD(lhp, tptr, next);
889 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, path, "created tstate\n");
890 return (CAM_REQ_CMP);
894 destroy_lun_state(ispsoftc_t *isp, int bus, tstate_t *tptr)
898 inot_private_data_t *ntp;
900 while ((ccb = (union ccb *)SLIST_FIRST(&tptr->atios)) != NULL) {
901 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
902 ccb->ccb_h.status = CAM_REQ_ABORTED;
905 while ((ccb = (union ccb *)SLIST_FIRST(&tptr->inots)) != NULL) {
906 SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle);
907 ccb->ccb_h.status = CAM_REQ_ABORTED;
910 while ((ntp = STAILQ_FIRST(&tptr->restart_queue)) != NULL) {
911 isp_endcmd(isp, ntp->data, NIL_HANDLE, bus, SCSI_STATUS_BUSY, 0);
912 STAILQ_REMOVE_HEAD(&tptr->restart_queue, next);
913 isp_put_ntpd(isp, bus, ntp);
915 ISP_GET_PC_ADDR(isp, bus, lun_hash[LUN_HASH_FUNC(tptr->ts_lun)], lhp);
916 SLIST_REMOVE(lhp, tptr, tstate, next);
917 free(tptr, M_DEVBUF);
921 isp_enable_lun(ispsoftc_t *isp, union ccb *ccb)
929 * We only support either target and lun both wildcard
930 * or target and lun both non-wildcard.
932 bus = XS_CHANNEL(ccb);
933 target = ccb->ccb_h.target_id;
934 lun = ccb->ccb_h.target_lun;
935 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path,
936 "enabling lun %jx\n", (uintmax_t)lun);
937 if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) {
938 ccb->ccb_h.status = CAM_LUN_INVALID;
943 /* Create the state pointer. It should not already exist. */
944 tptr = get_lun_statep(isp, bus, lun);
946 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
950 ccb->ccb_h.status = create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
951 if (ccb->ccb_h.status != CAM_REQ_CMP) {
956 ccb->ccb_h.status = CAM_REQ_CMP;
961 isp_disable_lun(ispsoftc_t *isp, union ccb *ccb)
963 tstate_t *tptr = NULL;
968 bus = XS_CHANNEL(ccb);
969 target = ccb->ccb_h.target_id;
970 lun = ccb->ccb_h.target_lun;
971 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path,
972 "disabling lun %jx\n", (uintmax_t)lun);
973 if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) {
974 ccb->ccb_h.status = CAM_LUN_INVALID;
979 /* Find the state pointer. */
980 if ((tptr = get_lun_statep(isp, bus, lun)) == NULL) {
981 ccb->ccb_h.status = CAM_PATH_INVALID;
986 destroy_lun_state(isp, bus, tptr);
987 ccb->ccb_h.status = CAM_REQ_CMP;
992 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb, enum Start_Ctio_How how)
994 int fctape, sendstatus, resid;
996 atio_private_data_t *atp;
997 struct ccb_scsiio *cso;
998 struct isp_ccbq *waitq;
999 uint32_t dmaresult, handle, xfrlen, sense_length, tmp;
1000 ct7_entry_t local, *cto = &local;
1002 isp_prt(isp, ISP_LOGTDEBUG0, "%s: ENTRY[0x%x] how %u xfrlen %u sendstatus %d sense_len %u", __func__, ccb->csio.tag_id, how, ccb->csio.dxfer_len,
1003 (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0, ((ccb->ccb_h.flags & CAM_SEND_SENSE)? ccb->csio.sense_len : 0));
1005 ISP_GET_PC_ADDR(isp, XS_CHANNEL(ccb), waitq, waitq);
1009 * Insert at the tail of the list, if any, waiting CTIO CCBs
1011 TAILQ_INSERT_TAIL(waitq, &ccb->ccb_h, sim_links.tqe);
1015 case FROM_CTIO_DONE:
1016 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1020 while ((ccb = (union ccb *) TAILQ_FIRST(waitq)) != NULL) {
1021 TAILQ_REMOVE(waitq, &ccb->ccb_h, sim_links.tqe);
1024 xfrlen = cso->dxfer_len;
1026 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
1027 ISP_PATH_PRT(isp, ISP_LOGERR, ccb->ccb_h.path, "a data transfer length of zero but no status to send is wrong\n");
1028 ccb->ccb_h.status = CAM_REQ_INVALID;
1034 atp = isp_find_atpd(isp, XS_CHANNEL(ccb), cso->tag_id);
1036 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] cannot find private data adjunct in %s", __func__, cso->tag_id, __func__);
1037 isp_dump_atpd(isp, XS_CHANNEL(ccb));
1038 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1044 * Is this command a dead duck?
1047 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] not sending a CTIO for a dead command", __func__, cso->tag_id);
1048 ccb->ccb_h.status = CAM_REQ_ABORTED;
1054 * Check to make sure we're still in target mode.
1056 fcp = FCPARAM(isp, XS_CHANNEL(ccb));
1057 if ((fcp->role & ISP_ROLE_TARGET) == 0) {
1058 isp_prt(isp, ISP_LOGERR, "%s: [0x%x] stopping sending a CTIO because we're no longer in target mode", __func__, cso->tag_id);
1059 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1065 * We're only handling ATPD_CCB_OUTSTANDING outstanding CCB at a time (one of which
1066 * could be split into two CTIOs to split data and status).
1068 if (atp->ctcnt >= ATPD_CCB_OUTSTANDING) {
1069 isp_prt(isp, ISP_LOGTINFO, "[0x%x] handling only %d CCBs at a time (flags for this ccb: 0x%x)", cso->tag_id, ATPD_CCB_OUTSTANDING, ccb->ccb_h.flags);
1070 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1075 * Does the initiator expect FC-Tape style responses?
1077 if ((atp->word3 & PRLI_WD3_RETRY) && fcp->fctape_enabled) {
1084 * If we already did the data xfer portion of a CTIO that sends data
1085 * and status, don't do it again and do the status portion now.
1088 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] now sending synthesized status orig_dl=%u xfered=%u bit=%u",
1089 cso->tag_id, atp->orig_datalen, atp->bytes_xfered, atp->bytes_in_transit);
1090 xfrlen = 0; /* we already did the data transfer */
1093 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1099 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
1100 KASSERT((sendstatus != 0), ("how can you have CAM_SEND_SENSE w/o CAM_SEND_STATUS?"));
1102 * Sense length is not the entire sense data structure size. Periph
1103 * drivers don't seem to be setting sense_len to reflect the actual
1104 * size. We'll peek inside to get the right amount.
1106 sense_length = cso->sense_len;
1109 * This 'cannot' happen
1111 if (sense_length > (XCMD_SIZE - MIN_FCP_RESPONSE_SIZE)) {
1112 sense_length = XCMD_SIZE - MIN_FCP_RESPONSE_SIZE;
1119 * Check for overflow
1121 tmp = atp->bytes_xfered + atp->bytes_in_transit;
1122 if (xfrlen > 0 && tmp > atp->orig_datalen) {
1123 isp_prt(isp, ISP_LOGERR,
1124 "%s: [0x%x] data overflow by %u bytes", __func__,
1125 cso->tag_id, tmp + xfrlen - atp->orig_datalen);
1126 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1130 if (xfrlen > atp->orig_datalen - tmp) {
1131 xfrlen = atp->orig_datalen - tmp;
1132 if (xfrlen == 0 && !sendstatus) {
1133 cso->resid = cso->dxfer_len;
1134 ccb->ccb_h.status = CAM_REQ_CMP;
1140 memset(cto, 0, QENTRY_LEN);
1141 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1142 cto->ct_header.rqs_entry_count = 1;
1143 cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM;
1144 ATPD_SET_SEQNO(cto, atp);
1145 cto->ct_nphdl = atp->nphdl;
1146 cto->ct_rxid = atp->tag;
1147 cto->ct_iid_lo = atp->sid;
1148 cto->ct_iid_hi = atp->sid >> 16;
1149 cto->ct_oxid = atp->oxid;
1150 cto->ct_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(ccb));
1151 cto->ct_timeout = XS_TIME(ccb);
1152 cto->ct_flags = atp->tattr << CT7_TASK_ATTR_SHIFT;
1155 * Mode 1, status, no data. Only possible when we are sending status, have
1156 * no data to transfer, and any sense data can fit into a ct7_entry_t.
1158 * Mode 2, status, no data. We have to use this in the case that
1159 * the sense data won't fit into a ct7_entry_t.
1162 if (sendstatus && xfrlen == 0) {
1163 cto->ct_flags |= CT7_SENDSTATUS | CT7_NO_DATA;
1164 resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit;
1165 if (sense_length <= MAXRESPLEN_24XX) {
1166 cto->ct_flags |= CT7_FLAG_MODE1;
1167 cto->ct_scsi_status = cso->scsi_status;
1169 cto->ct_resid = -resid;
1170 cto->ct_scsi_status |= (FCP_RESID_OVERFLOW << 8);
1171 } else if (resid > 0) {
1172 cto->ct_resid = resid;
1173 cto->ct_scsi_status |= (FCP_RESID_UNDERFLOW << 8);
1176 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1179 cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8);
1180 cto->rsp.m1.ct_resplen = cto->ct_senselen = sense_length;
1181 memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, sense_length);
1187 if (atp->ests == NULL) {
1188 atp->ests = isp_get_ecmd(isp);
1189 if (atp->ests == NULL) {
1190 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1194 memset(&rp, 0, sizeof(rp));
1196 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1197 rp.fcp_rsp_bits |= FCP_CONF_REQ;
1199 cto->ct_flags |= CT7_FLAG_MODE2;
1200 rp.fcp_rsp_scsi_status = cso->scsi_status;
1202 rp.fcp_rsp_resid = -resid;
1203 rp.fcp_rsp_bits |= FCP_RESID_OVERFLOW;
1204 } else if (resid > 0) {
1205 rp.fcp_rsp_resid = resid;
1206 rp.fcp_rsp_bits |= FCP_RESID_UNDERFLOW;
1209 rp.fcp_rsp_snslen = sense_length;
1210 cto->ct_senselen = sense_length;
1211 rp.fcp_rsp_bits |= FCP_SNSLEN_VALID;
1212 isp_put_fcp_rsp_iu(isp, &rp, atp->ests);
1213 memcpy(((fcp_rsp_iu_t *)atp->ests)->fcp_rsp_extra, &cso->sense_data, sense_length);
1215 isp_put_fcp_rsp_iu(isp, &rp, atp->ests);
1217 if (isp->isp_dblev & ISP_LOGTDEBUG1) {
1218 isp_print_bytes(isp, "FCP Response Frame After Swizzling", MIN_FCP_RESPONSE_SIZE + sense_length, atp->ests);
1220 bus_dmamap_sync(isp->isp_osinfo.ecmd_dmat, isp->isp_osinfo.ecmd_map, BUS_DMASYNC_PREWRITE);
1221 addr = isp->isp_osinfo.ecmd_dma;
1222 addr += ((((isp_ecmd_t *)atp->ests) - isp->isp_osinfo.ecmd_base) * XCMD_SIZE);
1223 isp_prt(isp, ISP_LOGTDEBUG0, "%s: ests base %p vaddr %p ecmd_dma %jx addr %jx len %u", __func__, isp->isp_osinfo.ecmd_base, atp->ests,
1224 (uintmax_t) isp->isp_osinfo.ecmd_dma, (uintmax_t)addr, MIN_FCP_RESPONSE_SIZE + sense_length);
1225 cto->rsp.m2.ct_datalen = MIN_FCP_RESPONSE_SIZE + sense_length;
1226 cto->rsp.m2.ct_fcp_rsp_iudata.ds_base = DMA_LO32(addr);
1227 cto->rsp.m2.ct_fcp_rsp_iudata.ds_basehi = DMA_HI32(addr);
1228 cto->rsp.m2.ct_fcp_rsp_iudata.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length;
1231 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d slen %u sense: %x %x/%x/%x", __func__,
1232 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid, sense_length,
1233 cso->sense_data.error_code, cso->sense_data.sense_buf[1], cso->sense_data.sense_buf[11], cso->sense_data.sense_buf[12]);
1235 isp_prt(isp, ISP_LOGDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d", __func__,
1236 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid);
1238 atp->state = ATPD_STATE_LAST_CTIO;
1242 * Mode 0 data transfers, *possibly* with status.
1245 cto->ct_flags |= CT7_FLAG_MODE0;
1246 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1247 cto->ct_flags |= CT7_DATA_IN;
1249 cto->ct_flags |= CT7_DATA_OUT;
1252 cto->rsp.m0.reloff = atp->bytes_xfered + atp->bytes_in_transit;
1253 cto->rsp.m0.ct_xfrlen = xfrlen;
1256 if (ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame && xfrlen > ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame) {
1257 isp_prt(isp, ISP_LOGWARN, "%s: truncating data frame with xfrlen %d to %d", __func__, xfrlen, xfrlen - (xfrlen >> 2));
1258 ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame = 0;
1259 cto->rsp.m0.ct_xfrlen -= xfrlen >> 2;
1263 resid = atp->orig_datalen - atp->bytes_xfered - xfrlen;
1264 if (cso->scsi_status == SCSI_STATUS_OK && resid == 0 /* && fctape == 0 */) {
1265 cto->ct_flags |= CT7_SENDSTATUS;
1266 atp->state = ATPD_STATE_LAST_CTIO;
1268 cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1271 atp->sendst = 1; /* send status later */
1272 cto->ct_header.rqs_seqno &= ~ATPD_SEQ_NOTIFY_CAM;
1273 atp->state = ATPD_STATE_CTIO;
1276 atp->state = ATPD_STATE_CTIO;
1278 isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x xfrlen=%u off=%u", __func__,
1279 cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, xfrlen, atp->bytes_xfered);
1282 if (isp_get_pcmd(isp, ccb)) {
1283 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "out of PCMDs\n");
1284 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1287 handle = isp_allocate_handle(isp, ccb, ISP_HANDLE_TARGET);
1289 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "No XFLIST pointers for %s\n", __func__);
1290 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1291 isp_free_pcmd(isp, ccb);
1294 atp->bytes_in_transit += xfrlen;
1295 PISP_PCMD(ccb)->datalen = xfrlen;
1298 * Call the dma setup routines for this entry (and any subsequent
1299 * CTIOs) if there's data to move, and then tell the f/w it's got
1300 * new things to play with. As with isp_start's usage of DMA setup,
1301 * any swizzling is done in the machine dependent layer. Because
1302 * of this, we put the request onto the queue area first in native
1305 cto->ct_syshandle = handle;
1306 dmaresult = ISP_DMASETUP(isp, cso, cto);
1307 if (dmaresult != 0) {
1308 isp_destroy_handle(isp, handle);
1309 isp_free_pcmd(isp, ccb);
1310 if (dmaresult == CMD_EAGAIN) {
1311 TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1314 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1318 ccb->ccb_h.status = CAM_REQ_INPROG | CAM_SIM_QUEUED;
1320 ccb->ccb_h.spriv_field0 = atp->bytes_xfered;
1322 ccb->ccb_h.spriv_field0 = ~0;
1330 isp_refire_notify_ack(void *arg)
1332 isp_tna_t *tp = arg;
1333 ispsoftc_t *isp = tp->isp;
1335 ISP_ASSERT_LOCKED(isp);
1336 if (isp_notify_ack(isp, tp->not)) {
1337 callout_schedule(&tp->timer, 5);
1345 isp_complete_ctio(ispsoftc_t *isp, union ccb *ccb)
1348 isp_rq_check_below(isp);
1349 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1354 isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep)
1358 uint16_t chan, nphdl = NIL_HANDLE;
1362 struct ccb_accept_tio *atiop;
1363 atio_private_data_t *atp = NULL;
1364 atio_private_data_t *oatp;
1365 inot_private_data_t *ntp;
1367 did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2];
1368 sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2];
1369 lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(aep->at_cmnd.fcp_cmnd_lun));
1371 if (ISP_CAP_MULTI_ID(isp) && isp->isp_nchan > 1) {
1372 /* Channel has to be derived from D_ID */
1373 isp_find_chan_by_did(isp, did, &chan);
1374 if (chan == ISP_NOCHAN) {
1375 isp_prt(isp, ISP_LOGWARN,
1376 "%s: [RX_ID 0x%x] D_ID %x not found on any channel",
1377 __func__, aep->at_rxid, did);
1378 isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN,
1387 * Find the PDB entry for this initiator
1389 if (isp_find_pdb_by_portid(isp, chan, sid, &lp) == 0) {
1391 * If we're not in the port database terminate the exchange.
1393 isp_prt(isp, ISP_LOGTINFO, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x wasn't in PDB already",
1394 __func__, aep->at_rxid, did, chan, sid);
1395 isp_dump_portdb(isp, chan);
1396 isp_endcmd(isp, aep, NIL_HANDLE, chan, ECMD_TERMINATE, 0);
1402 * Get the tstate pointer
1404 tptr = get_lun_statep(isp, chan, lun);
1406 tptr = get_lun_statep(isp, chan, CAM_LUN_WILDCARD);
1408 isp_prt(isp, ISP_LOGWARN,
1409 "%s: [0x%x] no state pointer for lun %jx or wildcard",
1410 __func__, aep->at_rxid, (uintmax_t)lun);
1412 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0);
1414 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0);
1421 * Start any commands pending resources first.
1423 if (isp_atio_restart(isp, chan, tptr))
1427 * If the f/w is out of resources, just send a BUSY status back.
1429 if (aep->at_rxid == AT7_NORESRC_RXID) {
1430 isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0);
1435 * If we're out of resources, just send a BUSY status back.
1437 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1438 if (atiop == NULL) {
1439 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atios", aep->at_rxid);
1443 oatp = isp_find_atpd(isp, chan, aep->at_rxid);
1445 isp_prt(isp, oatp->state == ATPD_STATE_LAST_CTIO ? ISP_LOGTDEBUG0 :
1446 ISP_LOGWARN, "[0x%x] tag wraparound (N-Port Handle "
1447 "0x%04x S_ID 0x%04x OX_ID 0x%04x) oatp state %d",
1448 aep->at_rxid, nphdl, sid, aep->at_hdr.ox_id, oatp->state);
1450 * It's not a "no resource" condition- but we can treat it like one
1454 atp = isp_get_atpd(isp, chan, aep->at_rxid);
1456 isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atps", aep->at_rxid);
1457 isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0);
1460 atp->word3 = lp->prli_word3;
1461 atp->state = ATPD_STATE_ATIO;
1462 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1463 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, atiop->ccb_h.path, "Take FREE ATIO\n");
1464 atiop->init_id = FC_PORTDB_TGT(isp, chan, lp);
1465 atiop->ccb_h.target_id = ISP_MAX_TARGETS(isp);
1466 atiop->ccb_h.target_lun = lun;
1467 atiop->sense_len = 0;
1468 cdbxlen = aep->at_cmnd.fcp_cmnd_alen_datadir >> FCP_CMND_ADDTL_CDBLEN_SHIFT;
1470 isp_prt(isp, ISP_LOGWARN, "additional CDBLEN ignored");
1472 cdbxlen = sizeof (aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb);
1473 ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb, cdbxlen);
1474 atiop->cdb_len = cdbxlen;
1475 atiop->ccb_h.status = CAM_CDB_RECVD;
1476 atiop->tag_id = atp->tag;
1477 switch (aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK) {
1478 case FCP_CMND_TASK_ATTR_SIMPLE:
1479 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1480 atiop->tag_action = MSG_SIMPLE_TASK;
1482 case FCP_CMND_TASK_ATTR_HEAD:
1483 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1484 atiop->tag_action = MSG_HEAD_OF_QUEUE_TASK;
1486 case FCP_CMND_TASK_ATTR_ORDERED:
1487 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1488 atiop->tag_action = MSG_ORDERED_TASK;
1490 case FCP_CMND_TASK_ATTR_ACA:
1491 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1492 atiop->tag_action = MSG_ACA_TASK;
1494 case FCP_CMND_TASK_ATTR_UNTAGGED:
1496 atiop->tag_action = 0;
1499 atiop->priority = (aep->at_cmnd.fcp_cmnd_task_attribute &
1500 FCP_CMND_PRIO_MASK) >> FCP_CMND_PRIO_SHIFT;
1501 atp->orig_datalen = aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl;
1502 atp->bytes_xfered = 0;
1507 atp->oxid = aep->at_hdr.ox_id;
1508 atp->rxid = aep->at_hdr.rx_id;
1509 atp->cdb0 = atiop->cdb_io.cdb_bytes[0];
1510 atp->tattr = aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK;
1511 atp->state = ATPD_STATE_CAM;
1512 isp_prt(isp, ISP_LOGTDEBUG0, "ATIO7[0x%x] CDB=0x%x lun %jx datalen %u",
1513 aep->at_rxid, atp->cdb0, (uintmax_t)lun, atp->orig_datalen);
1514 xpt_done((union ccb *)atiop);
1517 KASSERT(atp == NULL, ("%s: atp is not NULL on noresrc!\n", __func__));
1518 ntp = isp_get_ntpd(isp, chan);
1520 isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0);
1523 memcpy(ntp->data, aep, QENTRY_LEN);
1524 STAILQ_INSERT_TAIL(&tptr->restart_queue, ntp, next);
1529 * Handle starting an SRR (sequence retransmit request)
1530 * We get here when we've gotten the immediate notify
1531 * and the return of all outstanding CTIOs for this
1535 isp_handle_srr_start(ispsoftc_t *isp, atio_private_data_t *atp)
1537 in_fcentry_24xx_t *inot;
1538 uint32_t srr_off, ccb_off, ccb_len, ccb_end;
1541 inot = (in_fcentry_24xx_t *)atp->srr;
1542 srr_off = inot->in_srr_reloff_lo | (inot->in_srr_reloff_hi << 16);
1544 atp->srr_ccb = NULL;
1547 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] null ccb", atp->tag);
1551 ccb_off = ccb->ccb_h.spriv_field0;
1552 ccb_len = ccb->csio.dxfer_len;
1553 ccb_end = (ccb_off == ~0)? ~0 : ccb_off + ccb_len;
1555 switch (inot->in_srr_iu) {
1556 case R_CTL_INFO_SOLICITED_DATA:
1558 * We have to restart a FCP_DATA data out transaction
1561 atp->bytes_xfered = srr_off;
1563 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x but current CCB doesn't transfer data", atp->tag, srr_off);
1566 if (srr_off < ccb_off || ccb_off > srr_off + ccb_len) {
1567 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x not covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end);
1570 isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end);
1572 case R_CTL_INFO_COMMAND_STATUS:
1573 isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] Got an FCP RSP SRR- resending status", atp->tag);
1576 * We have to restart a FCP_RSP IU transaction
1579 case R_CTL_INFO_DATA_DESCRIPTOR:
1581 * We have to restart an FCP DATA in transaction
1583 isp_prt(isp, ISP_LOGWARN, "Got an FCP DATA IN SRR- dropping");
1587 isp_prt(isp, ISP_LOGWARN, "Got an unknown information (%x) SRR- dropping", inot->in_srr_iu);
1592 * We can't do anything until this is acked, so we might as well start it now.
1593 * We aren't going to do the usual asynchronous ack issue because we need
1594 * to make sure this gets on the wire first.
1596 if (isp_notify_ack(isp, inot)) {
1597 isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose");
1600 isp_target_start_ctio(isp, ccb, FROM_SRR);
1603 inot->in_reserved = 1;
1604 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot);
1605 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1606 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1607 isp_complete_ctio(isp, ccb);
1610 if (isp_notify_ack(isp, inot)) {
1611 isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose");
1614 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1615 ccb->ccb_h.status |= CAM_MESSAGE_RECV;
1617 * This is not a strict interpretation of MDP, but it's close
1619 ccb->csio.msg_ptr = &ccb->csio.sense_data.sense_buf[SSD_FULL_SIZE - 16];
1620 ccb->csio.msg_len = 7;
1621 ccb->csio.msg_ptr[0] = MSG_EXTENDED;
1622 ccb->csio.msg_ptr[1] = 5;
1623 ccb->csio.msg_ptr[2] = 0; /* modify data pointer */
1624 ccb->csio.msg_ptr[3] = srr_off >> 24;
1625 ccb->csio.msg_ptr[4] = srr_off >> 16;
1626 ccb->csio.msg_ptr[5] = srr_off >> 8;
1627 ccb->csio.msg_ptr[6] = srr_off;
1628 isp_complete_ctio(isp, ccb);
1633 isp_handle_platform_srr(ispsoftc_t *isp, isp_notify_t *notify)
1635 in_fcentry_24xx_t *inot = notify->nt_lreserved;
1636 atio_private_data_t *atp;
1637 uint32_t tag = notify->nt_tagval & 0xffffffff;
1639 atp = isp_find_atpd(isp, notify->nt_channel, tag);
1641 isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x in SRR Notify",
1643 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot);
1646 atp->srr_notify_rcvd = 1;
1647 memcpy(atp->srr, inot, sizeof (atp->srr));
1648 isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] flags 0x%x srr_iu %x reloff 0x%x",
1649 inot->in_rxid, inot->in_flags, inot->in_srr_iu,
1650 ((uint32_t)inot->in_srr_reloff_hi << 16) | inot->in_srr_reloff_lo);
1652 isp_handle_srr_start(isp, atp);
1656 isp_handle_platform_ctio(ispsoftc_t *isp, ct7_entry_t *ct)
1659 int sentstatus = 0, ok = 0, notify_cam = 0, failure = 0;
1660 atio_private_data_t *atp = NULL;
1662 uint32_t handle, data_requested, resid;
1664 handle = ct->ct_syshandle;
1665 ccb = isp_find_xs(isp, handle);
1667 isp_print_bytes(isp, "null ccb in isp_handle_platform_ctio", QENTRY_LEN, ct);
1670 isp_destroy_handle(isp, handle);
1671 resid = data_requested = PISP_PCMD(ccb)->datalen;
1672 isp_free_pcmd(isp, ccb);
1674 bus = XS_CHANNEL(ccb);
1675 atp = isp_find_atpd(isp, bus, ct->ct_rxid);
1678 * XXX: isp_clear_commands() generates fake CTIO with zero
1679 * ct_rxid value, filling only ct_syshandle. Workaround
1680 * that using tag_id from the CCB, pointed by ct_syshandle.
1682 atp = isp_find_atpd(isp, bus, ccb->csio.tag_id);
1685 isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x after I/O", __func__, ccb->csio.tag_id);
1688 KASSERT((atp->ctcnt > 0), ("ctio count not greater than zero"));
1689 atp->bytes_in_transit -= data_requested;
1691 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1693 if (ct->ct_nphdl == CT7_SRR) {
1695 if (atp->srr_notify_rcvd)
1696 isp_handle_srr_start(isp, atp);
1699 if (ct->ct_nphdl == CT_HBA_RESET) {
1700 sentstatus = (ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1702 failure = CAM_UNREC_HBA_ERROR;
1704 sentstatus = ct->ct_flags & CT7_SENDSTATUS;
1705 ok = (ct->ct_nphdl == CT7_OK);
1706 notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0;
1707 if ((ct->ct_flags & CT7_DATAMASK) != CT7_NO_DATA)
1708 resid = ct->ct_resid;
1710 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO7[%x] seq %u nc %d sts 0x%x flg 0x%x sns %d resid %d %s", __func__, ct->ct_rxid, ATPD_GET_SEQNO(ct),
1711 notify_cam, ct->ct_nphdl, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID");
1713 if (data_requested > 0) {
1714 atp->bytes_xfered += data_requested - resid;
1715 ccb->csio.resid = ccb->csio.dxfer_len -
1716 (data_requested - resid);
1718 if (sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE))
1719 ccb->ccb_h.status |= CAM_SENT_SENSE;
1720 ccb->ccb_h.status |= CAM_REQ_CMP;
1723 if (failure == CAM_UNREC_HBA_ERROR)
1724 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
1726 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1728 atp->state = ATPD_STATE_PDON;
1731 * We never *not* notify CAM when there has been any error (ok == 0),
1732 * so we never need to do an ATIO putback if we're not notifying CAM.
1734 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done (ok=%d nc=%d nowsendstatus=%d ccb ss=%d)",
1735 (sentstatus)? " FINAL " : "MIDTERM ", atp->tag, ok, notify_cam, atp->sendst, (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0);
1736 if (notify_cam == 0) {
1738 isp_target_start_ctio(isp, ccb, FROM_CTIO_DONE);
1744 * We are done with this ATIO if we successfully sent status.
1745 * In all other cases expect either another CTIO or XPT_ABORT.
1747 if (ok && sentstatus)
1748 isp_put_atpd(isp, bus, atp);
1751 * We're telling CAM we're done with this CTIO transaction.
1753 * 24XX cards never need an ATIO put back.
1755 isp_complete_ctio(isp, ccb);
1759 isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp, uint32_t rsp)
1761 ct7_entry_t local, *cto = &local;
1763 if (isp->isp_state != ISP_RUNSTATE) {
1764 isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) acked- h/w not ready (dropping)", mp->nt_ncode, mp->nt_lreserved != NULL);
1769 * This case is for a Task Management Function, which shows up as an ATIO7 entry.
1771 if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ATIO) {
1772 at7_entry_t *aep = (at7_entry_t *)mp->nt_lreserved;
1777 sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2];
1778 if (isp_find_pdb_by_portid(isp, mp->nt_channel, sid, &lp)) {
1783 ISP_MEMZERO(cto, sizeof (ct7_entry_t));
1784 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1785 cto->ct_header.rqs_entry_count = 1;
1786 cto->ct_nphdl = nphdl;
1787 cto->ct_rxid = aep->at_rxid;
1788 cto->ct_vpidx = mp->nt_channel;
1789 cto->ct_iid_lo = sid;
1790 cto->ct_iid_hi = sid >> 16;
1791 cto->ct_oxid = aep->at_hdr.ox_id;
1792 cto->ct_flags = CT7_SENDSTATUS|CT7_NOACK|CT7_NO_DATA|CT7_FLAG_MODE1;
1793 cto->ct_flags |= (aep->at_ta_len >> 12) << CT7_TASK_ATTR_SHIFT;
1795 cto->ct_scsi_status |= (FCP_RSPLEN_VALID << 8);
1796 cto->rsp.m1.ct_resplen = 4;
1797 ISP_MEMZERO(cto->rsp.m1.ct_resp, sizeof (cto->rsp.m1.ct_resp));
1798 cto->rsp.m1.ct_resp[0] = rsp & 0xff;
1799 cto->rsp.m1.ct_resp[1] = (rsp >> 8) & 0xff;
1800 cto->rsp.m1.ct_resp[2] = (rsp >> 16) & 0xff;
1801 cto->rsp.m1.ct_resp[3] = (rsp >> 24) & 0xff;
1803 return (isp_target_put_entry(isp, &cto));
1807 * This case is for a responding to an ABTS frame
1809 if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) {
1812 * Overload nt_need_ack here to mark whether we've terminated the associated command.
1814 if (mp->nt_need_ack) {
1815 abts_t *abts = (abts_t *)mp->nt_lreserved;
1817 ISP_MEMZERO(cto, sizeof (ct7_entry_t));
1818 isp_prt(isp, ISP_LOGTDEBUG0, "%s: [%x] terminating after ABTS received", __func__, abts->abts_rxid_task);
1819 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1820 cto->ct_header.rqs_entry_count = 1;
1821 cto->ct_nphdl = mp->nt_nphdl;
1822 cto->ct_rxid = abts->abts_rxid_task;
1823 cto->ct_iid_lo = mp->nt_sid;
1824 cto->ct_iid_hi = mp->nt_sid >> 16;
1825 cto->ct_oxid = abts->abts_ox_id;
1826 cto->ct_vpidx = mp->nt_channel;
1827 cto->ct_flags = CT7_NOACK|CT7_TERMINATE;
1828 if (isp_target_put_entry(isp, cto)) {
1831 mp->nt_need_ack = 0;
1833 if (isp_acknak_abts(isp, mp->nt_lreserved, 0) == ENOMEM) {
1841 * Handle logout cases here
1843 if (mp->nt_ncode == NT_GLOBAL_LOGOUT) {
1844 isp_del_all_wwn_entries(isp, mp->nt_channel);
1847 if (mp->nt_ncode == NT_LOGOUT)
1848 isp_del_wwn_entries(isp, mp);
1851 * General purpose acknowledgement
1853 if (mp->nt_need_ack) {
1854 isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) being acked", mp->nt_ncode, mp->nt_lreserved != NULL);
1856 * Don't need to use the guaranteed send because the caller can retry
1858 return (isp_notify_ack(isp, mp->nt_lreserved));
1864 * Handle task management functions.
1866 * We show up here with a notify structure filled out.
1868 * The nt_lreserved tag points to the original queue entry
1871 isp_handle_platform_target_tmf(ispsoftc_t *isp, isp_notify_t *notify)
1875 struct ccb_immediate_notify *inot;
1876 inot_private_data_t *ntp = NULL;
1877 atio_private_data_t *atp;
1880 isp_prt(isp, ISP_LOGTDEBUG0, "%s: code 0x%x sid 0x%x tagval 0x%016llx chan %d lun %jx", __func__, notify->nt_ncode,
1881 notify->nt_sid, (unsigned long long) notify->nt_tagval, notify->nt_channel, notify->nt_lun);
1882 if (notify->nt_lun == LUN_ANY) {
1883 if (notify->nt_tagval == TAG_ANY) {
1884 lun = CAM_LUN_WILDCARD;
1886 atp = isp_find_atpd(isp, notify->nt_channel,
1887 notify->nt_tagval & 0xffffffff);
1888 lun = atp ? atp->lun : CAM_LUN_WILDCARD;
1891 lun = notify->nt_lun;
1893 tptr = get_lun_statep(isp, notify->nt_channel, lun);
1895 tptr = get_lun_statep(isp, notify->nt_channel, CAM_LUN_WILDCARD);
1897 isp_prt(isp, ISP_LOGWARN, "%s: no state pointer found for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun);
1901 inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots);
1903 isp_prt(isp, ISP_LOGWARN, "%s: out of immediate notify structures for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun);
1907 inot->ccb_h.target_id = ISP_MAX_TARGETS(isp);
1908 inot->ccb_h.target_lun = lun;
1909 if (isp_find_pdb_by_portid(isp, notify->nt_channel, notify->nt_sid, &lp) == 0 &&
1910 isp_find_pdb_by_handle(isp, notify->nt_channel, notify->nt_nphdl, &lp) == 0) {
1911 inot->initiator_id = CAM_TARGET_WILDCARD;
1913 inot->initiator_id = FC_PORTDB_TGT(isp, notify->nt_channel, lp);
1915 inot->seq_id = notify->nt_tagval;
1916 inot->tag_id = notify->nt_tagval >> 32;
1918 switch (notify->nt_ncode) {
1920 isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, inot->tag_id);
1921 inot->arg = MSG_ABORT_TASK;
1923 case NT_ABORT_TASK_SET:
1924 isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, TAG_ANY);
1925 inot->arg = MSG_ABORT_TASK_SET;
1928 inot->arg = MSG_CLEAR_ACA;
1930 case NT_CLEAR_TASK_SET:
1931 inot->arg = MSG_CLEAR_TASK_SET;
1934 inot->arg = MSG_LOGICAL_UNIT_RESET;
1936 case NT_TARGET_RESET:
1937 inot->arg = MSG_TARGET_RESET;
1939 case NT_QUERY_TASK_SET:
1940 inot->arg = MSG_QUERY_TASK_SET;
1942 case NT_QUERY_ASYNC_EVENT:
1943 inot->arg = MSG_QUERY_ASYNC_EVENT;
1946 isp_prt(isp, ISP_LOGWARN, "%s: unknown TMF code 0x%x for chan %d lun %#jx", __func__, notify->nt_ncode, notify->nt_channel, (uintmax_t)lun);
1950 ntp = isp_get_ntpd(isp, notify->nt_channel);
1952 isp_prt(isp, ISP_LOGWARN, "%s: out of inotify private structures", __func__);
1955 ISP_MEMCPY(&ntp->nt, notify, sizeof (isp_notify_t));
1956 if (notify->nt_lreserved) {
1957 ISP_MEMCPY(&ntp->data, notify->nt_lreserved, QENTRY_LEN);
1958 ntp->nt.nt_lreserved = &ntp->data;
1960 ntp->seq_id = notify->nt_tagval;
1961 ntp->tag_id = notify->nt_tagval >> 32;
1963 SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle);
1964 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, inot->ccb_h.path, "Take FREE INOT\n");
1965 inot->ccb_h.status = CAM_MESSAGE_RECV;
1966 xpt_done((union ccb *)inot);
1969 if (notify->nt_need_ack) {
1970 if (((isphdr_t *)notify->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) {
1971 if (isp_acknak_abts(isp, notify->nt_lreserved, ENOMEM)) {
1972 isp_prt(isp, ISP_LOGWARN, "you lose- unable to send an ACKNAK");
1975 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, notify->nt_lreserved);
1981 isp_target_mark_aborted_early(ispsoftc_t *isp, int chan, tstate_t *tptr, uint32_t tag_id)
1983 atio_private_data_t *atp, *atpool;
1984 inot_private_data_t *ntp, *tmp;
1985 uint32_t this_tag_id;
1988 * First, clean any commands pending restart
1990 STAILQ_FOREACH_SAFE(ntp, &tptr->restart_queue, next, tmp) {
1991 this_tag_id = ((at7_entry_t *)ntp->data)->at_rxid;
1992 if ((uint64_t)tag_id == TAG_ANY || tag_id == this_tag_id) {
1993 isp_endcmd(isp, ntp->data, NIL_HANDLE, chan,
1995 isp_put_ntpd(isp, chan, ntp);
1996 STAILQ_REMOVE(&tptr->restart_queue, ntp,
1997 inot_private_data, next);
2002 * Now mark other ones dead as well.
2004 ISP_GET_PC(isp, chan, atpool, atpool);
2005 for (atp = atpool; atp < &atpool[ATPDPSIZE]; atp++) {
2006 if (atp->lun != tptr->ts_lun)
2008 if ((uint64_t)tag_id == TAG_ANY || atp->tag == tag_id)
2015 isp_poll(struct cam_sim *sim)
2017 ispsoftc_t *isp = cam_sim_softc(sim);
2024 isp_watchdog(void *arg)
2026 struct ccb_scsiio *xs = arg;
2028 uint32_t ohandle = ISP_HANDLE_FREE, handle;
2032 handle = isp_find_handle(isp, xs);
2035 * Hand crank the interrupt code just to be sure the command isn't stuck somewhere.
2037 if (handle != ISP_HANDLE_FREE) {
2040 handle = isp_find_handle(isp, xs);
2042 if (handle != ISP_HANDLE_FREE) {
2044 * Try and make sure the command is really dead before
2045 * we release the handle (and DMA resources) for reuse.
2047 * If we are successful in aborting the command then
2048 * we're done here because we'll get the command returned
2051 if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) {
2056 * Note that after calling the above, the command may in
2057 * fact have been completed.
2059 xs = isp_find_xs(isp, handle);
2062 * If the command no longer exists, then we won't
2063 * be able to find the xs again with this handle.
2070 * After this point, the command is really dead.
2072 ISP_DMAFREE(isp, xs);
2073 isp_destroy_handle(isp, handle);
2074 isp_prt(isp, ISP_LOGERR, "%s: timeout for handle 0x%x", __func__, handle);
2075 XS_SETERR(xs, CAM_CMD_TIMEOUT);
2078 if (ohandle != ISP_HANDLE_FREE) {
2079 isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle 0x%x, recovered during interrupt", __func__, ohandle);
2081 isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle already free", __func__);
2087 isp_make_here(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt)
2090 struct isp_fc *fc = ISP_FC_PC(isp, chan);
2093 * Allocate a CCB, create a wildcard path for this target and schedule a rescan.
2095 ccb = xpt_alloc_ccb_nowait();
2097 isp_prt(isp, ISP_LOGWARN, "Chan %d unable to alloc CCB for rescan", chan);
2100 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(fc->sim),
2101 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2102 isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan");
2110 isp_make_gone(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt)
2112 struct cam_path *tp;
2113 struct isp_fc *fc = ISP_FC_PC(isp, chan);
2115 if (xpt_create_path(&tp, NULL, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
2116 xpt_async(AC_LOST_DEVICE, tp, NULL);
2122 * Gone Device Timer Function- when we have decided that a device has gone
2123 * away, we wait a specific period of time prior to telling the OS it has
2126 * This timer function fires once a second and then scans the port database
2127 * for devices that are marked dead but still have a virtual target assigned.
2128 * We decrement a counter for that port database entry, and when it hits zero,
2129 * we tell the OS the device has gone away.
2134 struct isp_fc *fc = arg;
2135 taskqueue_enqueue(taskqueue_thread, &fc->gtask);
2139 isp_gdt_task(void *arg, int pending)
2141 struct isp_fc *fc = arg;
2142 ispsoftc_t *isp = fc->isp;
2143 int chan = fc - isp->isp_osinfo.pc.fc;
2145 struct ac_contract ac;
2146 struct ac_device_changed *adc;
2147 int dbidx, more_to_do = 0;
2150 isp_prt(isp, ISP_LOGDEBUG0, "Chan %d GDT timer expired", chan);
2151 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
2152 lp = &FCPARAM(isp, chan)->portdb[dbidx];
2154 if (lp->state != FC_PORTDB_STATE_ZOMBIE) {
2157 if (lp->gone_timer != 0) {
2158 lp->gone_timer -= 1;
2162 isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Gone Device Timeout");
2163 if (lp->is_target) {
2165 isp_make_gone(isp, lp, chan, dbidx);
2167 if (lp->is_initiator) {
2168 lp->is_initiator = 0;
2169 ac.contract_number = AC_CONTRACT_DEV_CHG;
2170 adc = (struct ac_device_changed *) ac.contract_data;
2171 adc->wwpn = lp->port_wwn;
2172 adc->port = lp->portid;
2173 adc->target = dbidx;
2175 xpt_async(AC_CONTRACT, fc->path, &ac);
2177 lp->state = FC_PORTDB_STATE_NIL;
2181 callout_reset(&fc->gdt, hz, isp_gdt, fc);
2183 callout_deactivate(&fc->gdt);
2184 isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Stopping Gone Device Timer @ %lu", chan, (unsigned long) time_uptime);
2191 * When loop goes down we remember the time and freeze CAM command queue.
2192 * During some time period we are trying to reprobe the loop. But if we
2193 * fail, we tell the OS that devices have gone away and drop the freeze.
2195 * We don't clear the devices out of our port database because, when loop
2196 * come back up, we have to do some actual cleanup with the chip at that
2197 * point (implicit PLOGO, e.g., to get the chip's port database state right).
2200 isp_loop_changed(ispsoftc_t *isp, int chan)
2202 fcparam *fcp = FCPARAM(isp, chan);
2203 struct isp_fc *fc = ISP_FC_PC(isp, chan);
2205 if (fc->loop_down_time)
2207 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop changed", chan);
2208 if (fcp->role & ISP_ROLE_INITIATOR)
2209 isp_freeze_loopdown(isp, chan);
2210 fc->loop_down_time = time_uptime;
2215 isp_loop_up(ispsoftc_t *isp, int chan)
2217 struct isp_fc *fc = ISP_FC_PC(isp, chan);
2219 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is up", chan);
2220 fc->loop_seen_once = 1;
2221 fc->loop_down_time = 0;
2222 isp_unfreeze_loopdown(isp, chan);
2226 isp_loop_dead(ispsoftc_t *isp, int chan)
2228 fcparam *fcp = FCPARAM(isp, chan);
2229 struct isp_fc *fc = ISP_FC_PC(isp, chan);
2231 struct ac_contract ac;
2232 struct ac_device_changed *adc;
2235 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is dead", chan);
2238 * Notify to the OS all targets who we now consider have departed.
2240 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
2241 lp = &fcp->portdb[dbidx];
2243 if (lp->state == FC_PORTDB_STATE_NIL)
2246 for (i = 0; i < ISP_HANDLE_NUM(isp); i++) {
2247 struct ccb_scsiio *xs;
2249 if (ISP_H2HT(isp->isp_xflist[i].handle) != ISP_HANDLE_INITIATOR) {
2252 if ((xs = isp->isp_xflist[i].cmd) == NULL) {
2255 if (dbidx != XS_TGT(xs)) {
2258 isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx orphaned by loop down timeout",
2259 isp->isp_xflist[i].handle, chan, XS_TGT(xs),
2260 (uintmax_t)XS_LUN(xs));
2263 * Just like in isp_watchdog, abort the outstanding
2264 * command or immediately free its resources if it is
2267 if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) {
2271 ISP_DMAFREE(isp, xs);
2272 isp_destroy_handle(isp, isp->isp_xflist[i].handle);
2273 isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx could not be aborted and was destroyed",
2274 isp->isp_xflist[i].handle, chan, XS_TGT(xs),
2275 (uintmax_t)XS_LUN(xs));
2276 XS_SETERR(xs, HBA_BUSRESET);
2280 isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Loop Down Timeout");
2281 if (lp->is_target) {
2283 isp_make_gone(isp, lp, chan, dbidx);
2285 if (lp->is_initiator) {
2286 lp->is_initiator = 0;
2287 ac.contract_number = AC_CONTRACT_DEV_CHG;
2288 adc = (struct ac_device_changed *) ac.contract_data;
2289 adc->wwpn = lp->port_wwn;
2290 adc->port = lp->portid;
2291 adc->target = dbidx;
2293 xpt_async(AC_CONTRACT, fc->path, &ac);
2297 isp_unfreeze_loopdown(isp, chan);
2298 fc->loop_down_time = 0;
2302 isp_kthread(void *arg)
2304 struct isp_fc *fc = arg;
2305 ispsoftc_t *isp = fc->isp;
2306 int chan = fc - isp->isp_osinfo.pc.fc;
2311 while (isp->isp_osinfo.is_exiting == 0) {
2312 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2313 "Chan %d Checking FC state", chan);
2314 lb = isp_fc_runstate(isp, chan, 250000);
2315 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2316 "Chan %d FC got to %s state", chan,
2317 isp_fc_loop_statename(lb));
2320 * Our action is different based upon whether we're supporting
2321 * Initiator mode or not. If we are, we might freeze the simq
2322 * when loop is down and set all sorts of different delays to
2325 * If not, we simply just wait for loop to come up.
2327 if (lb == LOOP_READY || lb < 0) {
2331 * If we've never seen loop up and we've waited longer
2332 * than quickboot time, or we've seen loop up but we've
2333 * waited longer than loop_down_limit, give up and go
2334 * to sleep until loop comes up.
2336 if (fc->loop_seen_once == 0)
2337 lim = isp_quickboot_time;
2339 lim = fc->loop_down_limit;
2340 d = time_uptime - fc->loop_down_time;
2356 if (lb == LOOP_READY)
2357 isp_loop_up(isp, chan);
2359 isp_loop_dead(isp, chan);
2362 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2363 "Chan %d sleep for %d seconds", chan, slp);
2364 msleep(fc, &isp->isp_lock, PRIBIO, "ispf", slp * hz);
2366 fc->num_threads -= 1;
2371 #ifdef ISP_TARGET_MODE
2373 isp_abort_atio(ispsoftc_t *isp, union ccb *ccb)
2375 atio_private_data_t *atp;
2376 union ccb *accb = ccb->cab.abort_ccb;
2377 struct ccb_hdr *sccb;
2380 tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb));
2382 /* Search for the ATIO among queueued. */
2383 SLIST_FOREACH(sccb, &tptr->atios, sim_links.sle) {
2384 if (sccb != &accb->ccb_h)
2386 SLIST_REMOVE(&tptr->atios, sccb, ccb_hdr, sim_links.sle);
2387 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path,
2388 "Abort FREE ATIO\n");
2389 accb->ccb_h.status = CAM_REQ_ABORTED;
2391 ccb->ccb_h.status = CAM_REQ_CMP;
2396 /* Search for the ATIO among running. */
2397 atp = isp_find_atpd(isp, XS_CHANNEL(accb), accb->atio.tag_id);
2399 /* Send TERMINATE to firmware. */
2401 uint8_t storage[QENTRY_LEN];
2402 ct7_entry_t *cto = (ct7_entry_t *) storage;
2404 ISP_MEMZERO(cto, sizeof (ct7_entry_t));
2405 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
2406 cto->ct_header.rqs_entry_count = 1;
2407 cto->ct_nphdl = atp->nphdl;
2408 cto->ct_rxid = atp->tag;
2409 cto->ct_iid_lo = atp->sid;
2410 cto->ct_iid_hi = atp->sid >> 16;
2411 cto->ct_oxid = atp->oxid;
2412 cto->ct_vpidx = XS_CHANNEL(accb);
2413 cto->ct_flags = CT7_NOACK|CT7_TERMINATE;
2414 isp_target_put_entry(isp, cto);
2416 isp_put_atpd(isp, XS_CHANNEL(accb), atp);
2417 ccb->ccb_h.status = CAM_REQ_CMP;
2419 ccb->ccb_h.status = CAM_UA_ABORT;
2424 isp_abort_inot(ispsoftc_t *isp, union ccb *ccb)
2426 inot_private_data_t *ntp;
2427 union ccb *accb = ccb->cab.abort_ccb;
2428 struct ccb_hdr *sccb;
2431 tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb));
2433 /* Search for the INOT among queueued. */
2434 SLIST_FOREACH(sccb, &tptr->inots, sim_links.sle) {
2435 if (sccb != &accb->ccb_h)
2437 SLIST_REMOVE(&tptr->inots, sccb, ccb_hdr, sim_links.sle);
2438 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path,
2439 "Abort FREE INOT\n");
2440 accb->ccb_h.status = CAM_REQ_ABORTED;
2442 ccb->ccb_h.status = CAM_REQ_CMP;
2447 /* Search for the INOT among running. */
2448 ntp = isp_find_ntpd(isp, XS_CHANNEL(accb), accb->cin1.tag_id, accb->cin1.seq_id);
2450 if (ntp->nt.nt_need_ack) {
2451 isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK,
2452 ntp->nt.nt_lreserved);
2454 isp_put_ntpd(isp, XS_CHANNEL(accb), ntp);
2455 ccb->ccb_h.status = CAM_REQ_CMP;
2457 ccb->ccb_h.status = CAM_UA_ABORT;
2464 isp_action(struct cam_sim *sim, union ccb *ccb)
2466 int bus, tgt, error;
2469 struct ccb_trans_settings *cts;
2472 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
2474 isp = (ispsoftc_t *)cam_sim_softc(sim);
2475 ISP_ASSERT_LOCKED(isp);
2476 bus = cam_sim_bus(sim);
2477 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2478 ISP_PCMD(ccb) = NULL;
2480 switch (ccb->ccb_h.func_code) {
2481 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2483 * Do a couple of preliminary checks...
2485 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2486 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2487 ccb->ccb_h.status = CAM_REQ_INVALID;
2488 isp_done((struct ccb_scsiio *) ccb);
2493 if (ccb->ccb_h.target_id >= ISP_MAX_TARGETS(isp)) {
2494 xpt_print(ccb->ccb_h.path, "invalid target\n");
2495 ccb->ccb_h.status = CAM_PATH_INVALID;
2497 if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2502 ccb->csio.scsi_status = SCSI_STATUS_OK;
2503 if (isp_get_pcmd(isp, ccb)) {
2504 isp_prt(isp, ISP_LOGWARN, "out of PCMDs");
2505 cam_freeze_devq(ccb->ccb_h.path);
2506 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0);
2507 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2511 error = isp_start((XS_T *) ccb);
2512 isp_rq_check_above(isp);
2515 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2516 if (ccb->ccb_h.timeout == CAM_TIME_INFINITY)
2518 /* Give firmware extra 10s to handle timeout. */
2519 ts = SBT_1MS * ccb->ccb_h.timeout + 10 * SBT_1S;
2520 callout_reset_sbt(&PISP_PCMD(ccb)->wdog, ts, 0,
2521 isp_watchdog, ccb, 0);
2524 isp_prt(isp, ISP_LOGDEBUG0, "%d.%jx retry later",
2525 XS_TGT(ccb), (uintmax_t)XS_LUN(ccb));
2526 cam_freeze_devq(ccb->ccb_h.path);
2527 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0);
2528 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2529 isp_free_pcmd(isp, ccb);
2533 isp_free_pcmd(isp, ccb);
2534 cam_freeze_devq(ccb->ccb_h.path);
2535 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 10, 0);
2536 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2540 isp_done((struct ccb_scsiio *) ccb);
2543 isp_prt(isp, ISP_LOGERR, "What's this? 0x%x at %d in file %s", error, __LINE__, __FILE__);
2544 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2545 isp_free_pcmd(isp, ccb);
2550 #ifdef ISP_TARGET_MODE
2551 case XPT_EN_LUN: /* Enable/Disable LUN as a target */
2552 if (ccb->cel.enable) {
2553 isp_enable_lun(isp, ccb);
2555 isp_disable_lun(isp, ccb);
2558 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */
2559 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
2561 tstate_t *tptr = get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2565 if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY)
2566 str = "XPT_IMMEDIATE_NOTIFY";
2568 str = "XPT_ACCEPT_TARGET_IO";
2569 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path,
2570 "%s: no state pointer found for %s\n",
2572 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2577 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2578 ccb->atio.tag_id = 0;
2579 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, sim_links.sle);
2580 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path,
2582 } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) {
2583 ccb->cin1.seq_id = ccb->cin1.tag_id = 0;
2584 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle);
2585 ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path,
2588 ccb->ccb_h.status = CAM_REQ_INPROG;
2591 case XPT_NOTIFY_ACKNOWLEDGE: /* notify ack */
2593 inot_private_data_t *ntp;
2596 * XXX: Because we cannot guarantee that the path information in the notify acknowledge ccb
2597 * XXX: matches that for the immediate notify, we have to *search* for the notify structure
2600 * All the relevant path information is in the associated immediate notify
2602 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] NOTIFY ACKNOWLEDGE for 0x%x seen\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id);
2603 ntp = isp_find_ntpd(isp, XS_CHANNEL(ccb), ccb->cna2.tag_id, ccb->cna2.seq_id);
2605 ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] XPT_NOTIFY_ACKNOWLEDGE of 0x%x cannot find ntp private data\n", __func__,
2606 ccb->cna2.tag_id, ccb->cna2.seq_id);
2607 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2611 if (isp_handle_platform_target_notify_ack(isp, &ntp->nt,
2612 (ccb->ccb_h.flags & CAM_SEND_STATUS) ? ccb->cna2.arg : 0)) {
2613 cam_freeze_devq(ccb->ccb_h.path);
2614 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0);
2615 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2616 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2619 isp_put_ntpd(isp, XS_CHANNEL(ccb), ntp);
2620 ccb->ccb_h.status = CAM_REQ_CMP;
2621 ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] calling xpt_done for tag 0x%x\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id);
2625 case XPT_CONT_TARGET_IO:
2626 isp_target_start_ctio(isp, ccb, FROM_CAM);
2627 isp_rq_check_above(isp);
2630 case XPT_RESET_DEV: /* BDR the specified SCSI device */
2631 tgt = ccb->ccb_h.target_id;
2634 error = isp_control(isp, ISPCTL_RESET_DEV, bus, tgt);
2636 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2639 * If we have a FC device, reset the Command
2640 * Reference Number, because the target will expect
2641 * that we re-start the CRN at 1 after a reset.
2643 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2645 ccb->ccb_h.status = CAM_REQ_CMP;
2649 case XPT_ABORT: /* Abort the specified CCB */
2651 union ccb *accb = ccb->cab.abort_ccb;
2652 switch (accb->ccb_h.func_code) {
2653 #ifdef ISP_TARGET_MODE
2654 case XPT_ACCEPT_TARGET_IO:
2655 isp_abort_atio(isp, ccb);
2657 case XPT_IMMEDIATE_NOTIFY:
2658 isp_abort_inot(isp, ccb);
2662 error = isp_control(isp, ISPCTL_ABORT_CMD, accb);
2664 ccb->ccb_h.status = CAM_UA_ABORT;
2666 ccb->ccb_h.status = CAM_REQ_CMP;
2670 ccb->ccb_h.status = CAM_REQ_INVALID;
2674 * This is not a queued CCB, so the caller expects it to be
2675 * complete when control is returned.
2679 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
2680 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
2682 if (!IS_CURRENT_SETTINGS(cts)) {
2683 ccb->ccb_h.status = CAM_REQ_INVALID;
2687 ccb->ccb_h.status = CAM_REQ_CMP;
2690 case XPT_GET_TRAN_SETTINGS:
2692 struct ccb_trans_settings_scsi *scsi;
2693 struct ccb_trans_settings_fc *fc;
2696 scsi = &cts->proto_specific.scsi;
2697 fc = &cts->xport_specific.fc;
2698 tgt = cts->ccb_h.target_id;
2699 fcp = FCPARAM(isp, bus);
2701 cts->protocol = PROTO_SCSI;
2702 cts->protocol_version = SCSI_REV_2;
2703 cts->transport = XPORT_FC;
2704 cts->transport_version = 0;
2706 scsi->valid = CTS_SCSI_VALID_TQ;
2707 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2708 fc->valid = CTS_FC_VALID_SPEED;
2709 fc->bitrate = fcp->isp_gbspeed * 100000;
2710 if (tgt < MAX_FC_TARG) {
2711 fcportdb_t *lp = &fcp->portdb[tgt];
2712 fc->wwnn = lp->node_wwn;
2713 fc->wwpn = lp->port_wwn;
2714 fc->port = lp->portid;
2715 fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2717 ccb->ccb_h.status = CAM_REQ_CMP;
2721 case XPT_CALC_GEOMETRY:
2722 cam_calc_geometry(&ccb->ccg, 1);
2726 case XPT_RESET_BUS: /* Reset the specified bus */
2727 error = isp_control(isp, ISPCTL_RESET_BUS, bus);
2729 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2734 xpt_print(ccb->ccb_h.path, "reset bus on channel %d\n", bus);
2736 xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, 0);
2737 ccb->ccb_h.status = CAM_REQ_CMP;
2741 case XPT_TERM_IO: /* Terminate the I/O process */
2742 ccb->ccb_h.status = CAM_REQ_INVALID;
2746 case XPT_SET_SIM_KNOB: /* Set SIM knobs */
2748 struct ccb_sim_knob *kp = &ccb->knob;
2749 fcparam *fcp = FCPARAM(isp, bus);
2751 if (kp->xport_specific.fc.valid & KNOB_VALID_ADDRESS) {
2752 fcp->isp_wwnn = ISP_FC_PC(isp, bus)->def_wwnn = kp->xport_specific.fc.wwnn;
2753 fcp->isp_wwpn = ISP_FC_PC(isp, bus)->def_wwpn = kp->xport_specific.fc.wwpn;
2754 isp_prt(isp, ISP_LOGALL, "Setting Channel %d wwns to 0x%jx 0x%jx", bus, fcp->isp_wwnn, fcp->isp_wwpn);
2756 ccb->ccb_h.status = CAM_REQ_CMP;
2757 if (kp->xport_specific.fc.valid & KNOB_VALID_ROLE) {
2761 switch (kp->xport_specific.fc.role) {
2762 case KNOB_ROLE_NONE:
2763 if (fcp->role != ISP_ROLE_NONE) {
2765 newrole = ISP_ROLE_NONE;
2768 case KNOB_ROLE_TARGET:
2769 if (fcp->role != ISP_ROLE_TARGET) {
2771 newrole = ISP_ROLE_TARGET;
2774 case KNOB_ROLE_INITIATOR:
2775 if (fcp->role != ISP_ROLE_INITIATOR) {
2777 newrole = ISP_ROLE_INITIATOR;
2780 case KNOB_ROLE_BOTH:
2781 if (fcp->role != ISP_ROLE_BOTH) {
2783 newrole = ISP_ROLE_BOTH;
2788 ISP_PATH_PRT(isp, ISP_LOGCONFIG, ccb->ccb_h.path, "changing role on from %d to %d\n", fcp->role, newrole);
2789 if (isp_control(isp, ISPCTL_CHANGE_ROLE,
2790 bus, newrole) != 0) {
2791 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2800 case XPT_GET_SIM_KNOB_OLD: /* Get SIM knobs -- compat value */
2801 case XPT_GET_SIM_KNOB: /* Get SIM knobs */
2803 struct ccb_sim_knob *kp = &ccb->knob;
2804 fcparam *fcp = FCPARAM(isp, bus);
2806 kp->xport_specific.fc.wwnn = fcp->isp_wwnn;
2807 kp->xport_specific.fc.wwpn = fcp->isp_wwpn;
2808 switch (fcp->role) {
2810 kp->xport_specific.fc.role = KNOB_ROLE_NONE;
2812 case ISP_ROLE_TARGET:
2813 kp->xport_specific.fc.role = KNOB_ROLE_TARGET;
2815 case ISP_ROLE_INITIATOR:
2816 kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR;
2819 kp->xport_specific.fc.role = KNOB_ROLE_BOTH;
2822 kp->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE;
2823 ccb->ccb_h.status = CAM_REQ_CMP;
2827 case XPT_PATH_INQ: /* Path routing inquiry */
2829 struct ccb_pathinq *cpi = &ccb->cpi;
2831 cpi->version_num = 1;
2832 #ifdef ISP_TARGET_MODE
2833 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2835 cpi->target_sprt = 0;
2837 cpi->hba_eng_cnt = 0;
2838 cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2840 cpi->bus_id = cam_sim_bus(sim);
2841 cpi->maxio = (ISP_NSEG64_MAX - 1) * PAGE_SIZE;
2843 fcp = FCPARAM(isp, bus);
2845 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
2846 cpi->hba_misc |= PIM_EXTLUNS | PIM_NOSCAN;
2849 * Because our loop ID can shift from time to time,
2850 * make our initiator ID out of range of our bus.
2852 cpi->initiator_id = cpi->max_target + 1;
2855 * Set base transfer capabilities for Fibre Channel, for this HBA.
2858 cpi->base_transfer_speed = 8000000;
2860 cpi->base_transfer_speed = 4000000;
2861 cpi->hba_inquiry = PI_TAG_ABLE;
2862 cpi->transport = XPORT_FC;
2863 cpi->transport_version = 0;
2864 cpi->xport_specific.fc.wwnn = fcp->isp_wwnn;
2865 cpi->xport_specific.fc.wwpn = fcp->isp_wwpn;
2866 cpi->xport_specific.fc.port = fcp->isp_portid;
2867 cpi->xport_specific.fc.bitrate = fcp->isp_gbspeed * 1000;
2868 cpi->protocol = PROTO_SCSI;
2869 cpi->protocol_version = SCSI_REV_2;
2870 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2871 strlcpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2872 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2873 cpi->unit_number = cam_sim_unit(sim);
2874 cpi->ccb_h.status = CAM_REQ_CMP;
2879 ccb->ccb_h.status = CAM_REQ_INVALID;
2886 isp_done(XS_T *sccb)
2888 ispsoftc_t *isp = XS_ISP(sccb);
2892 XS_SETERR(sccb, CAM_REQ_CMP);
2894 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (sccb->scsi_status != SCSI_STATUS_OK)) {
2895 sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2896 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2897 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2899 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2903 sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2904 status = sccb->ccb_h.status & CAM_STATUS_MASK;
2905 if (status != CAM_REQ_CMP &&
2906 (sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2907 sccb->ccb_h.status |= CAM_DEV_QFRZN;
2908 xpt_freeze_devq(sccb->ccb_h.path, 1);
2911 if (ISP_PCMD(sccb)) {
2912 if (callout_active(&PISP_PCMD(sccb)->wdog))
2913 callout_stop(&PISP_PCMD(sccb)->wdog);
2914 isp_free_pcmd(isp, (union ccb *) sccb);
2916 isp_rq_check_below(isp);
2917 xpt_done((union ccb *) sccb);
2921 isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
2924 static const char prom[] = "Chan %d [%d] WWPN 0x%16jx PortID 0x%06x handle 0x%x %s %s";
2927 target_id_t tgt = 0;
2930 struct ac_contract ac;
2931 struct ac_device_changed *adc;
2935 case ISPASYNC_BUS_RESET:
2938 bus = va_arg(ap, int);
2940 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected", bus);
2941 xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, NULL);
2944 case ISPASYNC_LOOP_RESET:
2949 bus = va_arg(ap, int);
2952 lipp = ISP_READ(isp, OUTMAILBOX1);
2953 fcp = FCPARAM(isp, bus);
2955 isp_prt(isp, ISP_LOGINFO, "Chan %d LOOP Reset, LIP primitive %x", bus, lipp);
2957 * Per FCP-4, a Reset LIP should result in a CRN reset. Other
2958 * LIPs and loop up/down events should never reset the CRN. For
2959 * an as of yet unknown reason, 24xx series cards (and
2960 * potentially others) can interrupt with a LIP Reset status
2961 * when no LIP reset came down the wire. Additionally, the LIP
2962 * primitive accompanying this status would not be a valid LIP
2963 * Reset primitive, but some variation of an invalid AL_PA
2964 * LIP. As a result, we have to verify the AL_PD in the LIP
2965 * addresses our port before blindly resetting.
2967 if (FCP_IS_DEST_ALPD(fcp, (lipp & 0x00FF)))
2968 isp_fcp_reset_crn(isp, bus, /*tgt*/0, /*tgt_set*/ 0);
2969 isp_loop_changed(isp, bus);
2974 msg = "LIP Received";
2976 case ISPASYNC_LOOP_DOWN:
2980 case ISPASYNC_LOOP_UP:
2984 bus = va_arg(ap, int);
2986 isp_loop_changed(isp, bus);
2987 isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg);
2989 case ISPASYNC_DEV_ARRIVED:
2991 bus = va_arg(ap, int);
2992 lp = va_arg(ap, fcportdb_t *);
2994 fc = ISP_FC_PC(isp, bus);
2995 tgt = FC_PORTDB_TGT(isp, bus, lp);
2996 isp_gen_role_str(buf, sizeof (buf), lp->prli_word3);
2997 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "arrived");
2998 if ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) &&
2999 (lp->prli_word3 & PRLI_WD3_TARGET_FUNCTION)) {
3001 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
3002 isp_make_here(isp, lp, bus, tgt);
3004 if ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) &&
3005 (lp->prli_word3 & PRLI_WD3_INITIATOR_FUNCTION)) {
3006 lp->is_initiator = 1;
3007 ac.contract_number = AC_CONTRACT_DEV_CHG;
3008 adc = (struct ac_device_changed *) ac.contract_data;
3009 adc->wwpn = lp->port_wwn;
3010 adc->port = lp->portid;
3013 xpt_async(AC_CONTRACT, fc->path, &ac);
3016 case ISPASYNC_DEV_CHANGED:
3017 case ISPASYNC_DEV_STAYED:
3023 bus = va_arg(ap, int);
3024 lp = va_arg(ap, fcportdb_t *);
3026 fc = ISP_FC_PC(isp, bus);
3027 tgt = FC_PORTDB_TGT(isp, bus, lp);
3028 isp_gen_role_str(buf, sizeof (buf), lp->new_prli_word3);
3029 if (cmd == ISPASYNC_DEV_CHANGED)
3030 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->new_portid, lp->handle, buf, "changed");
3032 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "stayed");
3034 if (lp->is_target !=
3035 ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) &&
3036 (lp->new_prli_word3 & PRLI_WD3_TARGET_FUNCTION))) {
3037 lp->is_target = !lp->is_target;
3038 if (lp->is_target) {
3039 if (cmd == ISPASYNC_DEV_CHANGED) {
3040 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
3043 isp_make_here(isp, lp, bus, tgt);
3045 isp_make_gone(isp, lp, bus, tgt);
3046 if (cmd == ISPASYNC_DEV_CHANGED) {
3047 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
3052 if (lp->is_initiator !=
3053 ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) &&
3054 (lp->new_prli_word3 & PRLI_WD3_INITIATOR_FUNCTION))) {
3055 lp->is_initiator = !lp->is_initiator;
3056 ac.contract_number = AC_CONTRACT_DEV_CHG;
3057 adc = (struct ac_device_changed *) ac.contract_data;
3058 adc->wwpn = lp->port_wwn;
3059 adc->port = lp->portid;
3061 adc->arrived = lp->is_initiator;
3062 xpt_async(AC_CONTRACT, fc->path, &ac);
3065 if ((cmd == ISPASYNC_DEV_CHANGED) &&
3066 (crn_reset_done == 0))
3067 isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
3071 case ISPASYNC_DEV_GONE:
3073 bus = va_arg(ap, int);
3074 lp = va_arg(ap, fcportdb_t *);
3076 fc = ISP_FC_PC(isp, bus);
3077 tgt = FC_PORTDB_TGT(isp, bus, lp);
3079 * If this has a virtual target or initiator set the isp_gdt
3080 * timer running on it to delay its departure.
3082 isp_gen_role_str(buf, sizeof (buf), lp->prli_word3);
3083 if (lp->is_target || lp->is_initiator) {
3084 lp->state = FC_PORTDB_STATE_ZOMBIE;
3085 lp->gone_timer = fc->gone_device_time;
3086 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone zombie");
3087 if (fc->ready && !callout_active(&fc->gdt)) {
3088 isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Starting Gone Device Timer with %u seconds time now %lu", bus, lp->gone_timer, (unsigned long)time_uptime);
3089 callout_reset(&fc->gdt, hz, isp_gdt, fc);
3093 isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone");
3095 case ISPASYNC_CHANGE_NOTIFY:
3098 int evt, nphdl, nlstate, portid, reason;
3101 bus = va_arg(ap, int);
3102 evt = va_arg(ap, int);
3103 if (evt == ISPASYNC_CHANGE_PDB) {
3104 nphdl = va_arg(ap, int);
3105 nlstate = va_arg(ap, int);
3106 reason = va_arg(ap, int);
3107 } else if (evt == ISPASYNC_CHANGE_SNS) {
3108 portid = va_arg(ap, int);
3111 nlstate = reason = 0;
3115 if (evt == ISPASYNC_CHANGE_PDB) {
3117 msg = "Port Database Changed";
3118 isp_prt(isp, ISP_LOGINFO,
3119 "Chan %d %s (nphdl 0x%x state 0x%x reason 0x%x)",
3120 bus, msg, nphdl, nlstate, reason);
3122 * Port database syncs are not sufficient for
3123 * determining that logins or logouts are done on the
3124 * loop, but this information is directly available from
3125 * the reason code from the incoming mbox. We must reset
3126 * the fcp crn on these events according to FCP-4
3129 case PDB24XX_AE_IMPL_LOGO_1:
3130 case PDB24XX_AE_IMPL_LOGO_2:
3131 case PDB24XX_AE_IMPL_LOGO_3:
3132 case PDB24XX_AE_PLOGI_RCVD:
3133 case PDB24XX_AE_PRLI_RCVD:
3134 case PDB24XX_AE_PRLO_RCVD:
3135 case PDB24XX_AE_LOGO_RCVD:
3136 case PDB24XX_AE_PLOGI_DONE:
3137 case PDB24XX_AE_PRLI_DONE:
3139 * If the event is not global, twiddle tgt and
3140 * tgt_set to nominate only the target
3141 * associated with the nphdl.
3143 if (nphdl != PDB24XX_AE_GLOBAL) {
3144 /* Break if we don't yet have the pdb */
3145 if (!isp_find_pdb_by_handle(isp, bus, nphdl, &lp))
3147 tgt = FC_PORTDB_TGT(isp, bus, lp);
3150 isp_fcp_reset_crn(isp, bus, tgt, tgt_set);
3155 } else if (evt == ISPASYNC_CHANGE_SNS) {
3156 msg = "Name Server Database Changed";
3157 isp_prt(isp, ISP_LOGINFO, "Chan %d %s (PortID 0x%06x)",
3160 msg = "Other Change Notify";
3161 isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg);
3163 isp_loop_changed(isp, bus);
3166 #ifdef ISP_TARGET_MODE
3167 case ISPASYNC_TARGET_NOTIFY:
3169 isp_notify_t *notify;
3171 notify = va_arg(ap, isp_notify_t *);
3173 switch (notify->nt_ncode) {
3175 case NT_ABORT_TASK_SET:
3177 case NT_CLEAR_TASK_SET:
3179 case NT_TARGET_RESET:
3180 case NT_QUERY_TASK_SET:
3181 case NT_QUERY_ASYNC_EVENT:
3183 * These are task management functions.
3185 isp_handle_platform_target_tmf(isp, notify);
3193 * No action need be taken here.
3196 case NT_GLOBAL_LOGOUT:
3199 * This is device arrival/departure notification
3201 isp_handle_platform_target_notify_ack(isp, notify, 0);
3204 isp_handle_platform_srr(isp, notify);
3207 isp_prt(isp, ISP_LOGALL, "target notify code 0x%x", notify->nt_ncode);
3208 isp_handle_platform_target_notify_ack(isp, notify, 0);
3213 case ISPASYNC_TARGET_NOTIFY_ACK:
3217 inot = va_arg(ap, void *);
3219 if (isp_notify_ack(isp, inot)) {
3220 isp_tna_t *tp = malloc(sizeof (*tp), M_DEVBUF, M_NOWAIT);
3223 memcpy(tp->data, inot, sizeof (tp->data));
3225 callout_init_mtx(&tp->timer, &isp->isp_lock, 0);
3226 callout_reset(&tp->timer, 5,
3227 isp_refire_notify_ack, tp);
3229 isp_prt(isp, ISP_LOGERR, "you lose- cannot allocate a notify refire");
3234 case ISPASYNC_TARGET_ACTION:
3239 hp = va_arg(ap, isphdr_t *);
3241 switch (hp->rqs_entry_type) {
3243 isp_handle_platform_atio7(isp, (at7_entry_t *)hp);
3246 isp_handle_platform_ctio(isp, (ct7_entry_t *)hp);
3249 isp_prt(isp, ISP_LOGWARN, "%s: unhandled target action 0x%x",
3250 __func__, hp->rqs_entry_type);
3256 case ISPASYNC_FW_CRASH:
3259 mbox1 = ISP_READ(isp, OUTMAILBOX1);
3260 isp_prt(isp, ISP_LOGERR, "Internal Firmware Error @ RISC Address 0x%x", mbox1);
3262 mbox1 = isp->isp_osinfo.mbox_sleep_ok;
3263 isp->isp_osinfo.mbox_sleep_ok = 0;
3265 isp->isp_osinfo.mbox_sleep_ok = mbox1;
3266 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3271 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3277 isp_default_wwn(ispsoftc_t * isp, int chan, int isactive, int iswwnn)
3280 struct isp_fc *fc = ISP_FC_PC(isp, chan);
3282 /* First try to use explicitly configured WWNs. */
3283 seed = iswwnn ? fc->def_wwnn : fc->def_wwpn;
3287 /* Otherwise try to use WWNs from NVRAM. */
3289 seed = iswwnn ? FCPARAM(isp, chan)->isp_wwnn_nvram :
3290 FCPARAM(isp, chan)->isp_wwpn_nvram;
3295 /* If still no WWNs, try to steal them from the first channel. */
3297 seed = iswwnn ? ISP_FC_PC(isp, 0)->def_wwnn :
3298 ISP_FC_PC(isp, 0)->def_wwpn;
3300 seed = iswwnn ? FCPARAM(isp, 0)->isp_wwnn_nvram :
3301 FCPARAM(isp, 0)->isp_wwpn_nvram;
3305 /* If still nothing -- improvise. */
3307 seed = 0x400000007F000000ull + device_get_unit(isp->isp_dev);
3309 seed ^= 0x0100000000000000ULL;
3312 /* For additional channels we have to improvise even more. */
3313 if (!iswwnn && chan > 0) {
3315 * We'll stick our channel number plus one first into bits
3316 * 57..59 and thence into bits 52..55 which allows for 8 bits
3317 * of channel which is enough for our maximum of 255 channels.
3319 seed ^= 0x0100000000000000ULL;
3320 seed ^= ((uint64_t) (chan + 1) & 0xf) << 56;
3321 seed ^= ((uint64_t) ((chan + 1) >> 4) & 0xf) << 52;
3327 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...)
3333 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3336 snprintf(lbuf, sizeof (lbuf), "%s: ", device_get_nameunit(isp->isp_dev));
3339 vsnprintf(&lbuf[loc], sizeof (lbuf) - loc - 1, fmt, ap);
3341 printf("%s\n", lbuf);
3345 isp_xs_prt(ispsoftc_t *isp, XS_T *xs, int level, const char *fmt, ...)
3348 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3351 xpt_print_path(xs->ccb_h.path);
3359 isp_nanotime_sub(struct timespec *b, struct timespec *a)
3364 timespecsub(b, a, &x);
3365 elapsed = GET_NANOSEC(&x);
3372 isp_mbox_acquire(ispsoftc_t *isp)
3374 if (isp->isp_osinfo.mboxbsy) {
3377 isp->isp_osinfo.mboxcmd_done = 0;
3378 isp->isp_osinfo.mboxbsy = 1;
3384 isp_mbox_wait_complete(ispsoftc_t *isp, mbreg_t *mbp)
3388 to = (mbp->timeout == 0) ? MBCMD_DEFAULT_TIMEOUT : mbp->timeout;
3389 if (isp->isp_osinfo.mbox_sleep_ok) {
3390 isp->isp_osinfo.mbox_sleep_ok = 0;
3391 isp->isp_osinfo.mbox_sleeping = 1;
3392 msleep_sbt(&isp->isp_osinfo.mboxcmd_done, &isp->isp_lock,
3393 PRIBIO, "ispmbx_sleep", to * SBT_1US, 0, 0);
3394 isp->isp_osinfo.mbox_sleep_ok = 1;
3395 isp->isp_osinfo.mbox_sleeping = 0;
3397 for (t = 0; t < to; t += 100) {
3398 if (isp->isp_osinfo.mboxcmd_done)
3401 if (isp->isp_osinfo.mboxcmd_done)
3406 if (isp->isp_osinfo.mboxcmd_done == 0) {
3407 isp_prt(isp, ISP_LOGWARN, "%s Mailbox Command (0x%x) Timeout (%uus) (%s:%d)",
3408 isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled",
3409 isp->isp_lastmbxcmd, to, mbp->func, mbp->lineno);
3410 mbp->param[0] = MBOX_TIMEOUT;
3411 isp->isp_osinfo.mboxcmd_done = 1;
3416 isp_mbox_notify_done(ispsoftc_t *isp)
3418 isp->isp_osinfo.mboxcmd_done = 1;
3419 if (isp->isp_osinfo.mbox_sleeping)
3420 wakeup(&isp->isp_osinfo.mboxcmd_done);
3424 isp_mbox_release(ispsoftc_t *isp)
3426 isp->isp_osinfo.mboxbsy = 0;
3430 isp_fc_scratch_acquire(ispsoftc_t *isp, int chan)
3433 if (isp->isp_osinfo.pc.fc[chan].fcbsy) {
3436 isp->isp_osinfo.pc.fc[chan].fcbsy = 1;
3442 isp_platform_intr(void *arg)
3444 ispsoftc_t *isp = arg;
3452 isp_platform_intr_resp(void *arg)
3454 ispsoftc_t *isp = arg;
3457 isp_intr_respq(isp);
3460 /* We have handshake enabled, so explicitly complete interrupt */
3461 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
3465 isp_platform_intr_atio(void *arg)
3467 ispsoftc_t *isp = arg;
3470 #ifdef ISP_TARGET_MODE
3471 isp_intr_atioq(isp);
3475 /* We have handshake enabled, so explicitly complete interrupt */
3476 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
3481 struct ccb_scsiio *csio;
3487 isp_dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
3489 mush_t *mp = (mush_t *) arg;
3490 ispsoftc_t *isp= mp->isp;
3491 struct ccb_scsiio *csio = mp->csio;
3492 bus_dmasync_op_t op;
3498 if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
3499 ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN))
3500 op = BUS_DMASYNC_PREREAD;
3502 op = BUS_DMASYNC_PREWRITE;
3503 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op);
3505 mp->error = ISP_SEND_CMD(isp, mp->qe, dm_segs, nseg);
3507 isp_dmafree(isp, csio);
3511 isp_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *qe)
3516 if (XS_XFRLEN(csio)) {
3521 error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
3522 (union ccb *)csio, isp_dma2, &mp, BUS_DMA_NOWAIT);
3526 error = ISP_SEND_CMD(isp, qe, NULL, 0);
3539 csio->ccb_h.status = CAM_REQ_INVALID;
3540 error = CMD_COMPLETE;
3543 csio->ccb_h.status = CAM_UNREC_HBA_ERROR;
3544 error = CMD_COMPLETE;
3551 isp_dmafree(ispsoftc_t *isp, struct ccb_scsiio *csio)
3553 bus_dmasync_op_t op;
3555 if (XS_XFRLEN(csio) == 0)
3558 if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
3559 ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN))
3560 op = BUS_DMASYNC_POSTREAD;
3562 op = BUS_DMASYNC_POSTWRITE;
3563 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op);
3564 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
3568 * Reset the command reference number for all LUNs on a specific target
3569 * (needed when a target arrives again) or for all targets on a port
3570 * (needed for events like a LIP).
3573 isp_fcp_reset_crn(ispsoftc_t *isp, int chan, uint32_t tgt, int tgt_set)
3575 struct isp_fc *fc = ISP_FC_PC(isp, chan);
3576 struct isp_nexus *nxp;
3580 isp_prt(isp, ISP_LOGDEBUG0,
3581 "Chan %d resetting CRN on all targets", chan);
3583 isp_prt(isp, ISP_LOGDEBUG0,
3584 "Chan %d resetting CRN on target %u", chan, tgt);
3586 for (i = 0; i < NEXUS_HASH_WIDTH; i++) {
3587 for (nxp = fc->nexus_hash[i]; nxp != NULL; nxp = nxp->next) {
3588 if (tgt_set == 0 || tgt == nxp->tgt)
3595 isp_fcp_next_crn(ispsoftc_t *isp, uint8_t *crnp, XS_T *cmd)
3600 struct isp_nexus *nxp;
3603 chan = XS_CHANNEL(cmd);
3606 fc = &isp->isp_osinfo.pc.fc[chan];
3607 idx = NEXUS_HASH(tgt, lun);
3608 nxp = fc->nexus_hash[idx];
3611 if (nxp->tgt == tgt && nxp->lun == lun)
3616 nxp = fc->nexus_free_list;
3618 nxp = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_ZERO|M_NOWAIT);
3623 fc->nexus_free_list = nxp->next;
3627 nxp->next = fc->nexus_hash[idx];
3628 fc->nexus_hash[idx] = nxp;
3630 if (nxp->crnseed == 0)
3632 *crnp = nxp->crnseed++;
3637 * We enter with the lock held
3640 isp_timer(void *arg)
3642 ispsoftc_t *isp = arg;
3643 #ifdef ISP_TARGET_MODE
3644 isp_tmcmd_restart(isp);
3646 callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp);
3649 #ifdef ISP_TARGET_MODE
3651 isp_get_ecmd(ispsoftc_t *isp)
3653 isp_ecmd_t *ecmd = isp->isp_osinfo.ecmd_free;
3655 isp->isp_osinfo.ecmd_free = ecmd->next;
3661 isp_put_ecmd(ispsoftc_t *isp, isp_ecmd_t *ecmd)
3663 ecmd->next = isp->isp_osinfo.ecmd_free;
3664 isp->isp_osinfo.ecmd_free = ecmd;