3 * Copyright (c) 1997-2006 by Matthew Jacob
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice immediately at the beginning of the file, without modification,
11 * this list of conditions, and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 #include <dev/isp/isp_freebsd.h>
34 #include <sys/unistd.h>
35 #include <sys/kthread.h>
36 #include <machine/stdarg.h> /* for use by isp_prt below */
38 #include <sys/module.h>
39 #include <sys/ioccom.h>
40 #include <dev/isp/isp_ioctl.h>
41 #if __FreeBSD_version >= 500000
42 #include <sys/sysctl.h>
44 #include <cam/cam_periph.h>
47 MODULE_VERSION(isp, 1);
48 MODULE_DEPEND(isp, cam, 1, 1, 1);
49 int isp_announced = 0;
50 int isp_fabric_hysteresis = 5;
51 int isp_loop_down_limit = 300; /* default loop down limit */
52 int isp_change_is_bad = 0; /* "changed" devices are bad */
53 int isp_quickboot_time = 15; /* don't wait more than N secs for loop up */
54 int isp_gone_device_time = 30; /* grace time before reporting device lost */
55 static const char *roles[4] = {
56 "(none)", "Target", "Initiator", "Target/Initiator"
58 static const char prom3[] =
59 "PortID 0x%06x Departed from Target %u because of %s";
61 static void isp_freeze_loopdown(ispsoftc_t *, char *);
62 static d_ioctl_t ispioctl;
63 static void isp_intr_enable(void *);
64 static void isp_cam_async(void *, uint32_t, struct cam_path *, void *);
65 static void isp_poll(struct cam_sim *);
66 static timeout_t isp_watchdog;
67 static timeout_t isp_ldt;
68 static void isp_kthread(void *);
69 static void isp_action(struct cam_sim *, union ccb *);
71 #if __FreeBSD_version < 700000
72 ispfwfunc *isp_get_firmware_p = NULL;
75 #if __FreeBSD_version < 500000
76 #define ISP_CDEV_MAJOR 248
77 static struct cdevsw isp_cdevsw = {
79 /* close */ nullclose,
85 /* strategy */ nostrategy,
87 /* maj */ ISP_CDEV_MAJOR,
92 #define isp_sysctl_update(x) do { ; } while (0)
94 static struct cdevsw isp_cdevsw = {
95 .d_version = D_VERSION,
96 .d_flags = D_NEEDGIANT,
100 static void isp_sysctl_update(ispsoftc_t *);
103 static ispsoftc_t *isplist = NULL;
106 isp_attach(ispsoftc_t *isp)
108 int primary, secondary;
109 struct ccb_setasync csa;
110 struct cam_devq *devq;
112 struct cam_path *path;
115 * Establish (in case of 12X0) which bus is the primary.
122 * Create the device queue for our SIM(s).
124 devq = cam_simq_alloc(isp->isp_maxcmds);
130 * Construct our SIM entry.
132 ISPLOCK_2_CAMLOCK(isp);
133 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
134 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
137 CAMLOCK_2_ISPLOCK(isp);
140 CAMLOCK_2_ISPLOCK(isp);
142 isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
143 isp->isp_osinfo.ehook.ich_arg = isp;
144 ISPLOCK_2_CAMLOCK(isp);
145 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
146 cam_sim_free(sim, TRUE);
147 CAMLOCK_2_ISPLOCK(isp);
148 isp_prt(isp, ISP_LOGERR,
149 "could not establish interrupt enable hook");
153 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
154 cam_sim_free(sim, TRUE);
155 CAMLOCK_2_ISPLOCK(isp);
159 if (xpt_create_path(&path, NULL, cam_sim_path(sim),
160 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
161 xpt_bus_deregister(cam_sim_path(sim));
162 cam_sim_free(sim, TRUE);
163 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
164 CAMLOCK_2_ISPLOCK(isp);
168 xpt_setup_ccb(&csa.ccb_h, path, 5);
169 csa.ccb_h.func_code = XPT_SASYNC_CB;
170 csa.event_enable = AC_LOST_DEVICE;
171 csa.callback = isp_cam_async;
172 csa.callback_arg = sim;
173 xpt_action((union ccb *)&csa);
174 CAMLOCK_2_ISPLOCK(isp);
176 isp->isp_path = path;
178 * Create a kernel thread for fibre channel instances. We
179 * don't have dual channel FC cards.
182 ISPLOCK_2_CAMLOCK(isp);
183 #if __FreeBSD_version >= 500000
184 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
185 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
186 RFHIGHPID, 0, "%s: fc_thrd",
187 device_get_nameunit(isp->isp_dev)))
189 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
190 "%s: fc_thrd", device_get_nameunit(isp->isp_dev)))
193 xpt_bus_deregister(cam_sim_path(sim));
194 cam_sim_free(sim, TRUE);
195 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
196 CAMLOCK_2_ISPLOCK(isp);
197 isp_prt(isp, ISP_LOGERR, "could not create kthread");
200 CAMLOCK_2_ISPLOCK(isp);
202 * We start by being "loop down" if we have an initiator role
204 if (isp->isp_role & ISP_ROLE_INITIATOR) {
205 isp_freeze_loopdown(isp, "isp_attach");
206 isp->isp_osinfo.ldt =
207 timeout(isp_ldt, isp, isp_quickboot_time * hz);
208 isp->isp_osinfo.ldt_running = 1;
209 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
210 "Starting Initial Loop Down Timer");
216 * If we have a second channel, construct SIM entry for that.
218 if (IS_DUALBUS(isp)) {
219 ISPLOCK_2_CAMLOCK(isp);
220 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
221 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
223 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
224 xpt_free_path(isp->isp_path);
226 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
229 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
230 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
231 xpt_free_path(isp->isp_path);
232 cam_sim_free(sim, TRUE);
233 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
234 CAMLOCK_2_ISPLOCK(isp);
238 if (xpt_create_path(&path, NULL, cam_sim_path(sim),
239 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
240 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
241 xpt_free_path(isp->isp_path);
242 xpt_bus_deregister(cam_sim_path(sim));
243 cam_sim_free(sim, TRUE);
244 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
245 CAMLOCK_2_ISPLOCK(isp);
249 xpt_setup_ccb(&csa.ccb_h, path, 5);
250 csa.ccb_h.func_code = XPT_SASYNC_CB;
251 csa.event_enable = AC_LOST_DEVICE;
252 csa.callback = isp_cam_async;
253 csa.callback_arg = sim;
254 xpt_action((union ccb *)&csa);
255 CAMLOCK_2_ISPLOCK(isp);
257 isp->isp_path2 = path;
261 * Create device nodes
263 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
264 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
266 if (isp->isp_role != ISP_ROLE_NONE) {
267 isp->isp_state = ISP_RUNSTATE;
268 ISP_ENABLE_INTS(isp);
270 if (isplist == NULL) {
273 ispsoftc_t *tmp = isplist;
274 while (tmp->isp_osinfo.next) {
275 tmp = tmp->isp_osinfo.next;
277 tmp->isp_osinfo.next = isp;
279 isp_sysctl_update(isp);
283 isp_freeze_loopdown(ispsoftc_t *isp, char *msg)
285 if (isp->isp_osinfo.simqfrozen == 0) {
286 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
287 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
288 ISPLOCK_2_CAMLOCK(isp);
289 xpt_freeze_simq(isp->isp_sim, 1);
290 CAMLOCK_2_ISPLOCK(isp);
292 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
293 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
298 #if __FreeBSD_version < 500000
300 #define _IOP struct proc
302 #define _IOP struct thread
303 #define _DEV struct cdev *
307 ispioctl(_DEV dev, u_long c, caddr_t addr, int flags, _IOP *td)
310 int nr, retval = ENOTTY;
314 if (minor(dev) == device_get_unit(isp->isp_dev)) {
317 isp = isp->isp_osinfo.next;
323 #ifdef ISP_FW_CRASH_DUMP
324 case ISP_GET_FW_CRASH_DUMP:
326 uint16_t *ptr = FCPARAM(isp)->isp_dump_data;
331 sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
333 sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
337 void *uaddr = *((void **) addr);
338 if (copyout(ptr, uaddr, sz)) {
349 case ISP_FORCE_CRASH_DUMP:
352 isp_freeze_loopdown(isp,
353 "ispioctl(ISP_FORCE_CRASH_DUMP)");
363 int olddblev = isp->isp_dblev;
364 isp->isp_dblev = *(int *)addr;
365 *(int *)addr = olddblev;
370 *(int *)addr = isp->isp_role;
375 if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) {
379 *(int *)addr = isp->isp_role;
391 if (isp_fc_runstate(isp, 5 * 1000000)) {
402 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
410 case ISP_FC_GETDINFO:
412 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
418 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
423 lp = &FCPARAM(isp)->portdb[ifc->loopid];
424 if (lp->state == FC_PORTDB_STATE_VALID) {
425 ifc->role = lp->roles;
426 ifc->loopid = lp->handle;
427 ifc->portid = lp->portid;
428 ifc->node_wwn = lp->node_wwn;
429 ifc->port_wwn = lp->port_wwn;
439 isp_stats_t *sp = (isp_stats_t *) addr;
441 MEMZERO(sp, sizeof (*sp));
442 sp->isp_stat_version = ISP_STATS_VERSION;
443 sp->isp_type = isp->isp_type;
444 sp->isp_revision = isp->isp_revision;
446 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
447 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
448 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
449 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
450 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
451 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
452 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
453 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
461 isp->isp_intbogus = 0;
462 isp->isp_intmboxc = 0;
463 isp->isp_intoasync = 0;
464 isp->isp_rsltccmplt = 0;
465 isp->isp_fphccmplt = 0;
466 isp->isp_rscchiwater = 0;
467 isp->isp_fpcchiwater = 0;
471 case ISP_FC_GETHINFO:
473 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
474 MEMZERO(hba, sizeof (*hba));
476 hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev);
477 hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev);
478 hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev);
480 hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
481 hba->fc_scsi_supported = 1;
482 hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
483 hba->fc_loopid = FCPARAM(isp)->isp_loopid;
484 hba->nvram_node_wwn = FCPARAM(isp)->isp_nodewwn;
485 hba->nvram_port_wwn = FCPARAM(isp)->isp_portwwn;
486 hba->active_node_wwn = ISP_NODEWWN(isp);
487 hba->active_port_wwn = ISP_PORTWWN(isp);
492 case ISP_GET_FC_PARAM:
494 struct isp_fc_param *f = (struct isp_fc_param *) addr;
500 if (strcmp(f->param_name, "framelength") == 0) {
501 f->parameter = FCPARAM(isp)->isp_maxfrmlen;
505 if (strcmp(f->param_name, "exec_throttle") == 0) {
506 f->parameter = FCPARAM(isp)->isp_execthrottle;
510 if (strcmp(f->param_name, "fullduplex") == 0) {
511 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
516 if (strcmp(f->param_name, "loopid") == 0) {
517 f->parameter = FCPARAM(isp)->isp_loopid;
524 case ISP_SET_FC_PARAM:
526 struct isp_fc_param *f = (struct isp_fc_param *) addr;
527 uint32_t param = f->parameter;
533 if (strcmp(f->param_name, "framelength") == 0) {
534 if (param != 512 && param != 1024 && param != 1024) {
538 FCPARAM(isp)->isp_maxfrmlen = param;
542 if (strcmp(f->param_name, "exec_throttle") == 0) {
543 if (param < 16 || param > 255) {
547 FCPARAM(isp)->isp_execthrottle = param;
551 if (strcmp(f->param_name, "fullduplex") == 0) {
552 if (param != 0 && param != 1) {
557 FCPARAM(isp)->isp_fwoptions |=
560 FCPARAM(isp)->isp_fwoptions &=
566 if (strcmp(f->param_name, "loopid") == 0) {
567 if (param < 0 || param > 125) {
571 FCPARAM(isp)->isp_loopid = param;
581 struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr;
589 memset(&mbs, 0, sizeof (mbs));
590 needmarker = retval = 0;
591 loopid = fct->loopid;
592 if (FCPARAM(isp)->isp_2klogin == 0) {
595 switch (fct->action) {
597 mbs.param[0] = MBOX_CLEAR_ACA;
598 mbs.param[1] = loopid;
599 mbs.param[2] = fct->lun;
601 case IPT_TARGET_RESET:
602 mbs.param[0] = MBOX_TARGET_RESET;
603 mbs.param[1] = loopid;
607 mbs.param[0] = MBOX_LUN_RESET;
608 mbs.param[1] = loopid;
609 mbs.param[2] = fct->lun;
612 case IPT_CLEAR_TASK_SET:
613 mbs.param[0] = MBOX_CLEAR_TASK_SET;
614 mbs.param[1] = loopid;
615 mbs.param[2] = fct->lun;
618 case IPT_ABORT_TASK_SET:
619 mbs.param[0] = MBOX_ABORT_TASK_SET;
620 mbs.param[1] = loopid;
621 mbs.param[2] = fct->lun;
631 isp->isp_sendmarker |= 1;
633 retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs);
646 #if __FreeBSD_version >= 500000
648 isp_sysctl_update(ispsoftc_t *isp)
650 struct sysctl_ctx_list *ctx =
651 device_get_sysctl_ctx(isp->isp_osinfo.dev);
652 struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev);
658 snprintf(isp->isp_osinfo.sysctl_info.fc.wwnn,
659 sizeof (isp->isp_osinfo.sysctl_info.fc.wwnn), "0x%08x%08x",
660 (uint32_t) (ISP_NODEWWN(isp) >> 32), (uint32_t) ISP_NODEWWN(isp));
662 snprintf(isp->isp_osinfo.sysctl_info.fc.wwpn,
663 sizeof (isp->isp_osinfo.sysctl_info.fc.wwpn), "0x%08x%08x",
664 (uint32_t) (ISP_PORTWWN(isp) >> 32), (uint32_t) ISP_PORTWWN(isp));
666 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
667 "wwnn", CTLFLAG_RD, isp->isp_osinfo.sysctl_info.fc.wwnn, 0,
668 "World Wide Node Name");
670 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
671 "wwpn", CTLFLAG_RD, isp->isp_osinfo.sysctl_info.fc.wwpn, 0,
672 "World Wide Port Name");
674 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
676 CTLFLAG_RW, &isp->isp_osinfo.loop_down_limit, 0,
677 "How long to wait for loop to come back up");
679 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
681 CTLFLAG_RW, &isp->isp_osinfo.gone_device_time, 0,
682 "How long to wait for a device to reappear");
687 isp_intr_enable(void *arg)
689 ispsoftc_t *isp = arg;
690 if (isp->isp_role != ISP_ROLE_NONE) {
691 ISP_ENABLE_INTS(isp);
693 /* Release our hook so that the boot can continue. */
694 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
698 * Put the target mode functions here, because some are inlines
701 #ifdef ISP_TARGET_MODE
703 static __inline int is_lun_enabled(ispsoftc_t *, int, lun_id_t);
704 static __inline int are_any_luns_enabled(ispsoftc_t *, int);
705 static __inline tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t);
706 static __inline void rls_lun_statep(ispsoftc_t *, tstate_t *);
707 static __inline atio_private_data_t *isp_get_atpd(ispsoftc_t *, int);
709 create_lun_state(ispsoftc_t *, int, struct cam_path *, tstate_t **);
710 static void destroy_lun_state(ispsoftc_t *, tstate_t *);
711 static int isp_en_lun(ispsoftc_t *, union ccb *);
712 static void isp_ledone(ispsoftc_t *, lun_entry_t *);
713 static cam_status isp_abort_tgt_ccb(ispsoftc_t *, union ccb *);
714 static timeout_t isp_refire_putback_atio;
715 static void isp_complete_ctio(union ccb *);
716 static void isp_target_putback_atio(union ccb *);
717 static void isp_target_start_ctio(ispsoftc_t *, union ccb *);
718 static int isp_handle_platform_atio(ispsoftc_t *, at_entry_t *);
719 static int isp_handle_platform_atio2(ispsoftc_t *, at2_entry_t *);
720 static int isp_handle_platform_ctio(ispsoftc_t *, void *);
721 static int isp_handle_platform_notify_scsi(ispsoftc_t *, in_entry_t *);
722 static int isp_handle_platform_notify_fc(ispsoftc_t *, in_fcentry_t *);
725 is_lun_enabled(ispsoftc_t *isp, int bus, lun_id_t lun)
728 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
733 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
736 } while ((tptr = tptr->next) != NULL);
741 are_any_luns_enabled(ispsoftc_t *isp, int port)
744 if (IS_DUALBUS(isp)) {
745 lo = (port * (LUN_HASH_SIZE >> 1));
746 hi = lo + (LUN_HASH_SIZE >> 1);
751 for (lo = 0; lo < hi; lo++) {
752 if (isp->isp_osinfo.lun_hash[lo]) {
759 static __inline tstate_t *
760 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun)
762 tstate_t *tptr = NULL;
764 if (lun == CAM_LUN_WILDCARD) {
765 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
766 tptr = &isp->isp_osinfo.tsdflt[bus];
772 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
779 if (tptr->lun == lun && tptr->bus == bus) {
783 } while ((tptr = tptr->next) != NULL);
788 rls_lun_statep(ispsoftc_t *isp, tstate_t *tptr)
794 static __inline atio_private_data_t *
795 isp_get_atpd(ispsoftc_t *isp, int tag)
797 atio_private_data_t *atp;
798 for (atp = isp->isp_osinfo.atpdp;
799 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
807 create_lun_state(ispsoftc_t *isp, int bus,
808 struct cam_path *path, tstate_t **rslt)
813 tstate_t *tptr, *new;
815 lun = xpt_path_lun_id(path);
817 return (CAM_LUN_INVALID);
819 if (is_lun_enabled(isp, bus, lun)) {
820 return (CAM_LUN_ALRDY_ENA);
822 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
824 return (CAM_RESRC_UNAVAIL);
827 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
828 xpt_path_target_id(path), xpt_path_lun_id(path));
829 if (status != CAM_REQ_CMP) {
835 SLIST_INIT(&new->atios);
836 SLIST_INIT(&new->inots);
839 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
840 tptr = isp->isp_osinfo.lun_hash[hfx];
842 isp->isp_osinfo.lun_hash[hfx] = new;
849 return (CAM_REQ_CMP);
853 destroy_lun_state(ispsoftc_t *isp, tstate_t *tptr)
861 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
862 pw = isp->isp_osinfo.lun_hash[hfx];
865 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
866 isp->isp_osinfo.lun_hash[hfx] = pw->next;
871 if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
882 free(tptr, M_DEVBUF);
889 isp_en_lun(ispsoftc_t *isp, union ccb *ccb)
891 struct ccb_en_lun *cel = &ccb->cel;
894 int bus, cmd, av, wildcard, tm_on;
898 bus = XS_CHANNEL(ccb);
900 xpt_print(ccb->ccb_h.path, "illegal bus %d\n", bus);
901 ccb->ccb_h.status = CAM_PATH_INVALID;
904 tgt = ccb->ccb_h.target_id;
905 lun = ccb->ccb_h.target_lun;
907 if (isp->isp_dblev & ISP_LOGTDEBUG0) {
908 xpt_print(ccb->ccb_h.path, "%sabling lun 0x%x on channel %d\n",
909 cel->enable? "en" : "dis", lun, bus);
912 if ((lun != CAM_LUN_WILDCARD) &&
913 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
914 ccb->ccb_h.status = CAM_LUN_INVALID;
919 sdparam *sdp = isp->isp_param;
921 if (tgt != CAM_TARGET_WILDCARD &&
922 tgt != sdp->isp_initiator_id) {
923 ccb->ccb_h.status = CAM_TID_INVALID;
928 * There's really no point in doing this yet w/o multi-tid
929 * capability. Even then, it's problematic.
932 if (tgt != CAM_TARGET_WILDCARD &&
933 tgt != FCPARAM(isp)->isp_iid) {
934 ccb->ccb_h.status = CAM_TID_INVALID;
939 * This is as a good a place as any to check f/w capabilities.
941 if (FCPARAM(isp)->isp_tmode == 0) {
942 xpt_print(ccb->ccb_h.path,
943 "firmware does not support target mode\n");
944 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
948 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
949 * XXX: dork with our already fragile enable/disable code.
951 if (FCPARAM(isp)->isp_sccfw == 0) {
952 xpt_print(ccb->ccb_h.path,
953 "firmware not SCCLUN capable\n");
954 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
959 if (tgt == CAM_TARGET_WILDCARD) {
960 if (lun == CAM_LUN_WILDCARD) {
963 ccb->ccb_h.status = CAM_LUN_INVALID;
970 tm_on = (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) != 0;
973 * Next check to see whether this is a target/lun wildcard action.
975 * If so, we know that we can accept commands for luns that haven't
976 * been enabled yet and send them upstream. Otherwise, we have to
977 * handle them locally (if we see them at all).
981 tptr = &isp->isp_osinfo.tsdflt[bus];
984 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
988 xpt_create_path(&tptr->owner, NULL,
989 xpt_path_path_id(ccb->ccb_h.path),
990 xpt_path_target_id(ccb->ccb_h.path),
991 xpt_path_lun_id(ccb->ccb_h.path));
992 if (ccb->ccb_h.status != CAM_REQ_CMP) {
995 SLIST_INIT(&tptr->atios);
996 SLIST_INIT(&tptr->inots);
997 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
1000 ccb->ccb_h.status = CAM_REQ_CMP;
1004 ccb->ccb_h.status = CAM_SCSI_BUSY;
1007 xpt_free_path(tptr->owner);
1008 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
1013 * Now check to see whether this bus needs to be
1014 * enabled/disabled with respect to target mode.
1017 if (cel->enable && tm_on == 0) {
1018 av |= ENABLE_TARGET_FLAG;
1019 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1021 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1023 isp->isp_osinfo.tmflags[bus] &=
1024 ~TM_WILDCARD_ENABLED;
1025 xpt_free_path(tptr->owner);
1029 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
1030 xpt_print(ccb->ccb_h.path, "Target Mode Enabled\n");
1031 } else if (cel->enable == 0 && tm_on && wildcard) {
1032 if (are_any_luns_enabled(isp, bus)) {
1033 ccb->ccb_h.status = CAM_SCSI_BUSY;
1036 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1038 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1041 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1042 xpt_print(ccb->ccb_h.path, "Target Mode Disabled\n");
1046 ccb->ccb_h.status = CAM_REQ_CMP;
1051 * Find an empty slot
1053 for (seq = 0; seq < NLEACT; seq++) {
1054 if (isp->isp_osinfo.leact[seq] == 0) {
1058 if (seq >= NLEACT) {
1059 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1063 isp->isp_osinfo.leact[seq] = ccb;
1067 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
1068 if (ccb->ccb_h.status != CAM_REQ_CMP) {
1069 isp->isp_osinfo.leact[seq] = 0;
1073 tptr = get_lun_statep(isp, bus, lun);
1075 ccb->ccb_h.status = CAM_LUN_INVALID;
1081 int c, n, ulun = lun;
1083 cmd = RQSTYPE_ENABLE_LUN;
1086 if (IS_FC(isp) && lun != 0) {
1087 cmd = RQSTYPE_MODIFY_LUN;
1090 * For SCC firmware, we only deal with setting
1091 * (enabling or modifying) lun 0.
1095 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) {
1096 rls_lun_statep(isp, tptr);
1097 ccb->ccb_h.status = CAM_REQ_INPROG;
1101 int c, n, ulun = lun;
1103 cmd = -RQSTYPE_MODIFY_LUN;
1106 if (IS_FC(isp) && lun != 0) {
1109 * For SCC firmware, we only deal with setting
1110 * (enabling or modifying) lun 0.
1114 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq+1) == 0) {
1115 rls_lun_statep(isp, tptr);
1116 ccb->ccb_h.status = CAM_REQ_INPROG;
1120 rls_lun_statep(isp, tptr);
1121 xpt_print(ccb->ccb_h.path, "isp_lun_cmd failed\n");
1122 isp->isp_osinfo.leact[seq] = 0;
1123 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1128 isp_ledone(ispsoftc_t *isp, lun_entry_t *lep)
1130 const char lfmt[] = "now %sabled for target mode";
1135 struct ccb_en_lun *cel;
1137 seq = lep->le_reserved - 1;
1138 if (seq >= NLEACT) {
1139 isp_prt(isp, ISP_LOGERR,
1140 "seq out of range (%u) in isp_ledone", seq);
1143 ccb = isp->isp_osinfo.leact[seq];
1145 isp_prt(isp, ISP_LOGERR,
1146 "no ccb for seq %u in isp_ledone", seq);
1150 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), XS_LUN(ccb));
1152 xpt_print(ccb->ccb_h.path, "null tptr in isp_ledone\n");
1153 isp->isp_osinfo.leact[seq] = 0;
1157 if (lep->le_status != LUN_OK) {
1158 xpt_print(ccb->ccb_h.path,
1159 "ENABLE/MODIFY LUN returned 0x%x\n", lep->le_status);
1161 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1162 rls_lun_statep(isp, tptr);
1163 isp->isp_osinfo.leact[seq] = 0;
1164 ISPLOCK_2_CAMLOCK(isp);
1166 CAMLOCK_2_ISPLOCK(isp);
1169 isp_prt(isp, ISP_LOGTDEBUG0,
1170 "isp_ledone: ENABLE/MODIFY done okay");
1175 ccb->ccb_h.status = CAM_REQ_CMP;
1176 xpt_print(ccb->ccb_h.path, lfmt, "en");
1177 rls_lun_statep(isp, tptr);
1178 isp->isp_osinfo.leact[seq] = 0;
1179 ISPLOCK_2_CAMLOCK(isp);
1181 CAMLOCK_2_ISPLOCK(isp);
1185 if (lep->le_header.rqs_entry_type == RQSTYPE_MODIFY_LUN) {
1186 if (isp_lun_cmd(isp, -RQSTYPE_ENABLE_LUN, XS_CHANNEL(ccb),
1187 XS_TGT(ccb), XS_LUN(ccb), 0, 0, seq+1)) {
1188 xpt_print(ccb->ccb_h.path,
1189 "isp_ledone: isp_lun_cmd failed\n");
1192 rls_lun_statep(isp, tptr);
1196 xpt_print(ccb->ccb_h.path, lfmt, "dis");
1197 rls_lun_statep(isp, tptr);
1198 destroy_lun_state(isp, tptr);
1199 ccb->ccb_h.status = CAM_REQ_CMP;
1200 isp->isp_osinfo.leact[seq] = 0;
1201 ISPLOCK_2_CAMLOCK(isp);
1203 CAMLOCK_2_ISPLOCK(isp);
1204 if (are_any_luns_enabled(isp, XS_CHANNEL(ccb)) == 0) {
1205 int bus = XS_CHANNEL(ccb);
1207 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1209 isp_prt(isp, ISP_LOGWARN,
1210 "disable target mode on channel %d failed", bus);
1212 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1218 isp_abort_tgt_ccb(ispsoftc_t *isp, union ccb *ccb)
1221 struct ccb_hdr_slist *lp;
1222 struct ccb_hdr *curelm;
1224 union ccb *accb = ccb->cab.abort_ccb;
1226 xpt_print(ccb->ccb_h.path, "aborting ccb %p\n", accb);
1227 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1229 if (IS_FC(isp) && (accb->ccb_h.target_id !=
1230 ((fcparam *) isp->isp_param)->isp_loopid)) {
1232 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1233 ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1238 * Being restrictive about target ids is really about
1239 * making sure we're aborting for the right multi-tid
1240 * path. This doesn't really make much sense at present.
1243 return (CAM_PATH_INVALID);
1247 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1249 xpt_print(ccb->ccb_h.path, "can't get statep\n");
1250 return (CAM_PATH_INVALID);
1252 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1254 ctr = &tptr->atio_count;
1255 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1257 ctr = &tptr->inot_count;
1259 rls_lun_statep(isp, tptr);
1260 xpt_print(ccb->ccb_h.path, "bad function code %d\n",
1261 accb->ccb_h.func_code);
1262 return (CAM_UA_ABORT);
1264 curelm = SLIST_FIRST(lp);
1266 if (curelm == &accb->ccb_h) {
1268 SLIST_REMOVE_HEAD(lp, sim_links.sle);
1270 while(curelm != NULL) {
1271 struct ccb_hdr *nextelm;
1273 nextelm = SLIST_NEXT(curelm, sim_links.sle);
1274 if (nextelm == &accb->ccb_h) {
1276 SLIST_NEXT(curelm, sim_links.sle) =
1277 SLIST_NEXT(nextelm, sim_links.sle);
1283 rls_lun_statep(isp, tptr);
1286 accb->ccb_h.status = CAM_REQ_ABORTED;
1288 return (CAM_REQ_CMP);
1290 xpt_print(ccb->ccb_h.path, "ccb %p not found\n", accb);
1291 return (CAM_PATH_INVALID);
1295 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb)
1298 struct ccb_scsiio *cso = &ccb->csio;
1299 uint32_t nxti, optr, handle;
1300 uint8_t local[QENTRY_LEN];
1303 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1304 xpt_print(ccb->ccb_h.path,
1305 "Request Queue Overflow in isp_target_start_ctio\n");
1306 XS_SETERR(ccb, CAM_REQUEUE_REQ);
1309 memset(local, 0, QENTRY_LEN);
1312 * We're either moving data or completing a command here.
1316 atio_private_data_t *atp;
1317 ct2_entry_t *cto = (ct2_entry_t *) local;
1319 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1320 cto->ct_header.rqs_entry_count = 1;
1321 if (FCPARAM(isp)->isp_2klogin) {
1322 ((ct2e_entry_t *)cto)->ct_iid = cso->init_id;
1324 cto->ct_iid = cso->init_id;
1325 if (FCPARAM(isp)->isp_sccfw == 0) {
1326 cto->ct_lun = ccb->ccb_h.target_lun;
1330 atp = isp_get_atpd(isp, cso->tag_id);
1332 xpt_print(ccb->ccb_h.path,
1333 "cannot find private data adjunct for tag %x\n",
1335 XS_SETERR(ccb, CAM_REQ_CMP_ERR);
1339 cto->ct_rxid = cso->tag_id;
1340 if (cso->dxfer_len == 0) {
1341 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1342 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1343 cto->ct_flags |= CT2_SENDSTATUS;
1344 cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1346 atp->orig_datalen - atp->bytes_xfered;
1347 if (cto->ct_resid < 0) {
1348 cto->rsp.m1.ct_scsi_status |=
1350 } else if (cto->ct_resid > 0) {
1351 cto->rsp.m1.ct_scsi_status |=
1355 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1356 int m = min(cso->sense_len, MAXRESPLEN);
1357 memcpy(cto->rsp.m1.ct_resp,
1358 &cso->sense_data, m);
1359 cto->rsp.m1.ct_senselen = m;
1360 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1363 cto->ct_flags |= CT2_FLAG_MODE0;
1364 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1365 cto->ct_flags |= CT2_DATA_IN;
1367 cto->ct_flags |= CT2_DATA_OUT;
1369 cto->ct_reloff = atp->bytes_xfered;
1370 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1371 cto->ct_flags |= CT2_SENDSTATUS;
1372 cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1375 (atp->bytes_xfered + cso->dxfer_len);
1376 if (cto->ct_resid < 0) {
1377 cto->rsp.m0.ct_scsi_status |=
1379 } else if (cto->ct_resid > 0) {
1380 cto->rsp.m0.ct_scsi_status |=
1384 atp->last_xframt = cso->dxfer_len;
1387 * If we're sending data and status back together,
1388 * we can't also send back sense data as well.
1390 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1393 if (cto->ct_flags & CT2_SENDSTATUS) {
1394 isp_prt(isp, ISP_LOGTDEBUG0,
1395 "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1396 cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1397 cso->dxfer_len, cto->ct_resid);
1398 cto->ct_flags |= CT2_CCINCR;
1399 atp->state = ATPD_STATE_LAST_CTIO;
1401 atp->state = ATPD_STATE_CTIO;
1403 cto->ct_timeout = 10;
1405 ct_entry_t *cto = (ct_entry_t *) local;
1407 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1408 cto->ct_header.rqs_entry_count = 1;
1409 cto->ct_iid = cso->init_id;
1410 cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1411 cto->ct_tgt = ccb->ccb_h.target_id;
1412 cto->ct_lun = ccb->ccb_h.target_lun;
1413 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1414 if (AT_HAS_TAG(cso->tag_id)) {
1415 cto->ct_tag_val = (uint8_t) AT_GET_TAG(cso->tag_id);
1416 cto->ct_flags |= CT_TQAE;
1418 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1419 cto->ct_flags |= CT_NODISC;
1421 if (cso->dxfer_len == 0) {
1422 cto->ct_flags |= CT_NO_DATA;
1423 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1424 cto->ct_flags |= CT_DATA_IN;
1426 cto->ct_flags |= CT_DATA_OUT;
1428 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1429 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1430 cto->ct_scsi_status = cso->scsi_status;
1431 cto->ct_resid = cso->resid;
1432 isp_prt(isp, ISP_LOGTDEBUG0,
1433 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1434 cto->ct_fwhandle, cso->scsi_status, cso->resid,
1437 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1438 cto->ct_timeout = 10;
1441 if (isp_save_xs_tgt(isp, ccb, &handle)) {
1442 xpt_print(ccb->ccb_h.path,
1443 "No XFLIST pointers for isp_target_start_ctio\n");
1444 XS_SETERR(ccb, CAM_REQUEUE_REQ);
1450 * Call the dma setup routines for this entry (and any subsequent
1451 * CTIOs) if there's data to move, and then tell the f/w it's got
1452 * new things to play with. As with isp_start's usage of DMA setup,
1453 * any swizzling is done in the machine dependent layer. Because
1454 * of this, we put the request onto the queue area first in native
1459 ct2_entry_t *cto = (ct2_entry_t *) local;
1460 cto->ct_syshandle = handle;
1462 ct_entry_t *cto = (ct_entry_t *) local;
1463 cto->ct_syshandle = handle;
1466 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1468 ISP_ADD_REQUEST(isp, nxti);
1469 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1473 XS_SETERR(ccb, CAM_REQUEUE_REQ);
1479 isp_destroy_tgt_handle(isp, handle);
1482 ISPLOCK_2_CAMLOCK(isp);
1484 CAMLOCK_2_ISPLOCK(isp);
1488 isp_refire_putback_atio(void *arg)
1491 isp_target_putback_atio(arg);
1496 isp_target_putback_atio(union ccb *ccb)
1499 struct ccb_scsiio *cso;
1500 uint32_t nxti, optr;
1505 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1506 xpt_print(ccb->ccb_h.path,
1507 "isp_target_putback_atio: Request Queue Overflow\n");
1508 (void) timeout(isp_refire_putback_atio, ccb, 10);
1511 memset(qe, 0, QENTRY_LEN);
1514 at2_entry_t local, *at = &local;
1515 MEMZERO(at, sizeof (at2_entry_t));
1516 at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1517 at->at_header.rqs_entry_count = 1;
1518 if (FCPARAM(isp)->isp_sccfw) {
1519 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1521 at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1523 at->at_status = CT_OK;
1524 at->at_rxid = cso->tag_id;
1525 at->at_iid = cso->ccb_h.target_id;
1526 isp_put_atio2(isp, at, qe);
1528 at_entry_t local, *at = &local;
1529 MEMZERO(at, sizeof (at_entry_t));
1530 at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1531 at->at_header.rqs_entry_count = 1;
1532 at->at_iid = cso->init_id;
1533 at->at_iid |= XS_CHANNEL(ccb) << 7;
1534 at->at_tgt = cso->ccb_h.target_id;
1535 at->at_lun = cso->ccb_h.target_lun;
1536 at->at_status = CT_OK;
1537 at->at_tag_val = AT_GET_TAG(cso->tag_id);
1538 at->at_handle = AT_GET_HANDLE(cso->tag_id);
1539 isp_put_atio(isp, at, qe);
1541 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1542 ISP_ADD_REQUEST(isp, nxti);
1543 isp_complete_ctio(ccb);
1547 isp_complete_ctio(union ccb *ccb)
1549 ISPLOCK_2_CAMLOCK(isp);
1550 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1551 ccb->ccb_h.status |= CAM_REQ_CMP;
1553 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1555 CAMLOCK_2_ISPLOCK(isp);
1559 * Handle ATIO stuff that the generic code can't.
1560 * This means handling CDBs.
1564 isp_handle_platform_atio(ispsoftc_t *isp, at_entry_t *aep)
1567 int status, bus, iswildcard;
1568 struct ccb_accept_tio *atiop;
1571 * The firmware status (except for the QLTM_SVALID bit)
1572 * indicates why this ATIO was sent to us.
1574 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1576 * If the DISCONNECTS DISABLED bit is set in the flags field,
1577 * we're still connected on the SCSI bus.
1579 status = aep->at_status;
1580 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1582 * Bus Phase Sequence error. We should have sense data
1583 * suggested by the f/w. I'm not sure quite yet what
1584 * to do about this for CAM.
1586 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1587 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1590 if ((status & ~QLTM_SVALID) != AT_CDB) {
1591 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1593 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1597 bus = GET_BUS_VAL(aep->at_iid);
1598 tptr = get_lun_statep(isp, bus, aep->at_lun);
1600 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1603 * Because we can't autofeed sense data back with
1604 * a command for parallel SCSI, we can't give back
1605 * a CHECK CONDITION. We'll give back a BUSY status
1606 * instead. This works out okay because the only
1607 * time we should, in fact, get this, is in the
1608 * case that somebody configured us without the
1609 * blackhole driver, so they get what they deserve.
1611 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1619 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1620 if (atiop == NULL) {
1622 * Because we can't autofeed sense data back with
1623 * a command for parallel SCSI, we can't give back
1624 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1625 * instead. This works out okay because the only time we
1626 * should, in fact, get this, is in the case that we've
1629 xpt_print(tptr->owner,
1630 "no ATIOS for lun %d from initiator %d on channel %d\n",
1631 aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1632 if (aep->at_flags & AT_TQAE)
1633 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1635 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1636 rls_lun_statep(isp, tptr);
1639 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1641 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d",
1642 aep->at_lun, tptr->atio_count);
1644 atiop->ccb_h.target_id = aep->at_tgt;
1645 atiop->ccb_h.target_lun = aep->at_lun;
1647 if (aep->at_flags & AT_NODISC) {
1648 atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1650 atiop->ccb_h.flags = 0;
1653 if (status & QLTM_SVALID) {
1654 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1655 atiop->sense_len = amt;
1656 MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1658 atiop->sense_len = 0;
1661 atiop->init_id = GET_IID_VAL(aep->at_iid);
1662 atiop->cdb_len = aep->at_cdblen;
1663 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1664 atiop->ccb_h.status = CAM_CDB_RECVD;
1666 * Construct a tag 'id' based upon tag value (which may be 0..255)
1667 * and the handle (which we have to preserve).
1669 AT_MAKE_TAGID(atiop->tag_id, device_get_unit(isp->isp_dev), aep);
1670 if (aep->at_flags & AT_TQAE) {
1671 atiop->tag_action = aep->at_tag_type;
1672 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1674 xpt_done((union ccb*)atiop);
1675 isp_prt(isp, ISP_LOGTDEBUG0,
1676 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1677 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1678 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1679 aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1680 "nondisc" : "disconnecting");
1681 rls_lun_statep(isp, tptr);
1686 isp_handle_platform_atio2(ispsoftc_t *isp, at2_entry_t *aep)
1690 struct ccb_accept_tio *atiop;
1691 atio_private_data_t *atp;
1694 * The firmware status (except for the QLTM_SVALID bit)
1695 * indicates why this ATIO was sent to us.
1697 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1699 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1700 isp_prt(isp, ISP_LOGWARN,
1701 "bogus atio (0x%x) leaked to platform", aep->at_status);
1702 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1706 if (FCPARAM(isp)->isp_sccfw) {
1707 lun = aep->at_scclun;
1711 tptr = get_lun_statep(isp, 0, lun);
1713 isp_prt(isp, ISP_LOGTDEBUG0,
1714 "[0x%x] no state pointer for lun %d", aep->at_rxid, lun);
1715 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1717 isp_endcmd(isp, aep,
1718 SCSI_STATUS_CHECK_COND | ECMD_SVALID |
1719 (0x5 << 12) | (0x25 << 16), 0);
1724 atp = isp_get_atpd(isp, 0);
1725 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1726 if (atiop == NULL || atp == NULL) {
1729 * Because we can't autofeed sense data back with
1730 * a command for parallel SCSI, we can't give back
1731 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1732 * instead. This works out okay because the only time we
1733 * should, in fact, get this, is in the case that we've
1736 xpt_print(tptr->owner,
1737 "no %s for lun %d from initiator %d\n",
1738 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1739 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1740 rls_lun_statep(isp, tptr);
1741 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1744 atp->state = ATPD_STATE_ATIO;
1745 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1747 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO lun %d, count now %d",
1748 lun, tptr->atio_count);
1750 if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1751 atiop->ccb_h.target_id = FCPARAM(isp)->isp_loopid;
1752 atiop->ccb_h.target_lun = lun;
1755 * We don't get 'suggested' sense data as we do with SCSI cards.
1757 atiop->sense_len = 0;
1759 atiop->init_id = aep->at_iid;
1760 atiop->cdb_len = ATIO2_CDBLEN;
1761 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1762 atiop->ccb_h.status = CAM_CDB_RECVD;
1763 atiop->tag_id = aep->at_rxid;
1764 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1765 case ATIO2_TC_ATTR_SIMPLEQ:
1766 atiop->tag_action = MSG_SIMPLE_Q_TAG;
1768 case ATIO2_TC_ATTR_HEADOFQ:
1769 atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1771 case ATIO2_TC_ATTR_ORDERED:
1772 atiop->tag_action = MSG_ORDERED_Q_TAG;
1774 case ATIO2_TC_ATTR_ACAQ: /* ?? */
1775 case ATIO2_TC_ATTR_UNTAGGED:
1777 atiop->tag_action = 0;
1780 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1782 atp->tag = atiop->tag_id;
1784 atp->orig_datalen = aep->at_datalen;
1785 atp->last_xframt = 0;
1786 atp->bytes_xfered = 0;
1787 atp->state = ATPD_STATE_CAM;
1788 ISPLOCK_2_CAMLOCK(siP);
1789 xpt_done((union ccb*)atiop);
1791 isp_prt(isp, ISP_LOGTDEBUG0,
1792 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1793 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1794 lun, aep->at_taskflags, aep->at_datalen);
1795 rls_lun_statep(isp, tptr);
1800 isp_handle_platform_ctio(ispsoftc_t *isp, void *arg)
1803 int sentstatus, ok, notify_cam, resid = 0;
1807 * CTIO and CTIO2 are close enough....
1810 ccb = isp_find_xs_tgt(isp, ((ct_entry_t *)arg)->ct_syshandle);
1811 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1812 isp_destroy_tgt_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1815 ct2_entry_t *ct = arg;
1816 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1818 isp_prt(isp, ISP_LOGERR,
1819 "cannot find adjunct for %x after I/O",
1823 sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1824 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1825 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1826 ccb->ccb_h.status |= CAM_SENT_SENSE;
1828 notify_cam = ct->ct_header.rqs_seqno & 0x1;
1829 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1830 resid = ct->ct_resid;
1831 atp->bytes_xfered += (atp->last_xframt - resid);
1832 atp->last_xframt = 0;
1834 if (sentstatus || !ok) {
1837 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1838 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1839 ct->ct_rxid, ct->ct_status, ct->ct_flags,
1840 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1841 resid, sentstatus? "FIN" : "MID");
1844 /* XXX: should really come after isp_complete_ctio */
1845 atp->state = ATPD_STATE_PDON;
1847 ct_entry_t *ct = arg;
1848 sentstatus = ct->ct_flags & CT_SENDSTATUS;
1849 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1851 * We *ought* to be able to get back to the original ATIO
1852 * here, but for some reason this gets lost. It's just as
1853 * well because it's squirrelled away as part of periph
1856 * We can live without it as long as we continue to use
1857 * the auto-replenish feature for CTIOs.
1859 notify_cam = ct->ct_header.rqs_seqno & 0x1;
1860 if (ct->ct_status & QLTM_SVALID) {
1861 char *sp = (char *)ct;
1862 sp += CTIO_SENSE_OFFSET;
1863 ccb->csio.sense_len =
1864 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1865 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1866 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1868 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1869 resid = ct->ct_resid;
1871 isp_prt(isp, ISP_LOGTDEBUG0,
1872 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1873 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1874 ct->ct_status, ct->ct_flags, resid,
1875 sentstatus? "FIN" : "MID");
1876 tval = ct->ct_fwhandle;
1878 ccb->csio.resid += resid;
1881 * We're here either because intermediate data transfers are done
1882 * and/or the final status CTIO (which may have joined with a
1883 * Data Transfer) is done.
1885 * In any case, for this platform, the upper layers figure out
1886 * what to do next, so all we do here is collect status and
1887 * pass information along. Any DMA handles have already been
1890 if (notify_cam == 0) {
1891 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval);
1895 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1896 (sentstatus)? " FINAL " : "MIDTERM ", tval);
1899 isp_target_putback_atio(ccb);
1901 isp_complete_ctio(ccb);
1908 isp_handle_platform_notify_scsi(ispsoftc_t *isp, in_entry_t *inp)
1910 return (0); /* XXXX */
1914 isp_handle_platform_notify_fc(ispsoftc_t *isp, in_fcentry_t *inp)
1917 switch (inp->in_status) {
1918 case IN_PORT_LOGOUT:
1919 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1922 case IN_PORT_CHANGED:
1923 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1926 case IN_GLOBAL_LOGO:
1927 isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1931 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1932 struct ccb_immed_notify *inot = NULL;
1935 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1937 inot = (struct ccb_immed_notify *)
1938 SLIST_FIRST(&tptr->inots);
1941 SLIST_REMOVE_HEAD(&tptr->inots,
1943 isp_prt(isp, ISP_LOGTDEBUG0,
1944 "Take FREE INOT count now %d",
1948 isp_prt(isp, ISP_LOGWARN,
1949 "abort task RX_ID %x IID %d state %d",
1950 inp->in_seqid, inp->in_iid, atp->state);
1952 isp_prt(isp, ISP_LOGWARN,
1953 "abort task RX_ID %x from iid %d, state unknown",
1954 inp->in_seqid, inp->in_iid);
1957 inot->initiator_id = inp->in_iid;
1958 inot->sense_len = 0;
1959 inot->message_args[0] = MSG_ABORT_TAG;
1960 inot->message_args[1] = inp->in_seqid & 0xff;
1961 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1962 inot->ccb_h.status = CAM_MESSAGE_RECV;
1963 xpt_done((union ccb *)inot);
1975 isp_cam_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg)
1977 struct cam_sim *sim;
1980 sim = (struct cam_sim *)cbarg;
1981 isp = (ispsoftc_t *) cam_sim_softc(sim);
1983 case AC_LOST_DEVICE:
1985 uint16_t oflags, nflags;
1986 sdparam *sdp = isp->isp_param;
1989 tgt = xpt_path_target_id(path);
1991 sdp += cam_sim_bus(sim);
1993 nflags = sdp->isp_devparam[tgt].nvrm_flags;
1994 #ifndef ISP_TARGET_MODE
1995 nflags &= DPARM_SAFE_DFLT;
1996 if (isp->isp_loaded_fw) {
1997 nflags |= DPARM_NARROW | DPARM_ASYNC;
2000 nflags = DPARM_DEFAULT;
2002 oflags = sdp->isp_devparam[tgt].goal_flags;
2003 sdp->isp_devparam[tgt].goal_flags = nflags;
2004 sdp->isp_devparam[tgt].dev_update = 1;
2005 isp->isp_update |= (1 << cam_sim_bus(sim));
2006 (void) isp_control(isp,
2007 ISPCTL_UPDATE_PARAMS, NULL);
2008 sdp->isp_devparam[tgt].goal_flags = oflags;
2014 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
2020 isp_poll(struct cam_sim *sim)
2022 ispsoftc_t *isp = cam_sim_softc(sim);
2024 uint16_t sema, mbox;
2027 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
2028 isp_intr(isp, isr, sema, mbox);
2034 static int isp_watchdog_work(ispsoftc_t *, XS_T *);
2037 isp_watchdog_work(ispsoftc_t *isp, XS_T *xs)
2042 * We've decided this command is dead. Make sure we're not trying
2043 * to kill a command that's already dead by getting it's handle and
2044 * and seeing whether it's still alive.
2047 handle = isp_find_handle(isp, xs);
2050 uint16_t sema, mbox;
2052 if (XS_CMD_DONE_P(xs)) {
2053 isp_prt(isp, ISP_LOGDEBUG1,
2054 "watchdog found done cmd (handle 0x%x)", handle);
2059 if (XS_CMD_WDOG_P(xs)) {
2060 isp_prt(isp, ISP_LOGDEBUG2,
2061 "recursive watchdog (handle 0x%x)", handle);
2067 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
2068 isp_intr(isp, isr, sema, mbox);
2070 if (XS_CMD_DONE_P(xs)) {
2071 isp_prt(isp, ISP_LOGDEBUG2,
2072 "watchdog cleanup for handle 0x%x", handle);
2073 ISPLOCK_2_CAMLOCK(isp);
2074 xpt_done((union ccb *) xs);
2075 CAMLOCK_2_ISPLOCK(isp);
2076 } else if (XS_CMD_GRACE_P(xs)) {
2078 * Make sure the command is *really* dead before we
2079 * release the handle (and DMA resources) for reuse.
2081 (void) isp_control(isp, ISPCTL_ABORT_CMD, xs);
2084 * After this point, the comamnd is really dead.
2086 if (XS_XFRLEN(xs)) {
2087 ISP_DMAFREE(isp, xs, handle);
2089 isp_destroy_handle(isp, handle);
2090 xpt_print(xs->ccb_h.path,
2091 "watchdog timeout for handle 0x%x\n", handle);
2092 XS_SETERR(xs, CAM_CMD_TIMEOUT);
2094 ISPLOCK_2_CAMLOCK(isp);
2096 CAMLOCK_2_ISPLOCK(isp);
2099 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
2101 isp->isp_sendmarker |= 1 << XS_CHANNEL(xs);
2111 isp_watchdog(void *arg)
2115 for (isp = isplist; isp != NULL; isp = isp->isp_osinfo.next) {
2116 if (isp_watchdog_work(isp, xs)) {
2121 printf("isp_watchdog: nobody had %p active\n", arg);
2126 #if __FreeBSD_version >= 500000
2127 #define isp_make_here(isp, tgt) isp_announce(isp, tgt, AC_FOUND_DEVICE)
2128 #define isp_make_gone(isp, tgt) isp_announce(isp, tgt, AC_LOST_DEVICE)
2131 * Support function for Announcement
2134 isp_announce(ispsoftc_t *isp, int tgt, int action)
2136 struct cam_path *tmppath;
2137 ISPLOCK_2_CAMLOCK(isp);
2138 if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim), tgt,
2139 CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
2140 xpt_async(action, tmppath, NULL);
2141 xpt_free_path(tmppath);
2143 CAMLOCK_2_ISPLOCK(isp);
2146 #define isp_make_here(isp, tgt) do { ; } while (0)
2147 #define isp_make_gone(isp, tgt) do { ; } while (0)
2152 * Gone Device Timer Function- when we have decided that a device has gone
2153 * away, we wait a specific period of time prior to telling the OS it has
2156 * This timer function fires once a second and then scans the port database
2157 * for devices that are marked dead but still have a virtual target assigned.
2158 * We decrement a counter for that port database entry, and when it hits zero,
2159 * we tell the OS the device has gone away.
2164 ispsoftc_t *isp = arg;
2166 int dbidx, tgt, more_to_do = 0;
2168 isp_prt(isp, ISP_LOGDEBUG0, "GDT timer expired");
2170 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
2171 lp = &FCPARAM(isp)->portdb[dbidx];
2173 if (lp->state != FC_PORTDB_STATE_ZOMBIE) {
2176 if (lp->ini_map_idx == 0) {
2179 if (lp->new_reserved == 0) {
2182 lp->new_reserved -= 1;
2183 if (lp->new_reserved != 0) {
2187 tgt = lp->ini_map_idx - 1;
2188 FCPARAM(isp)->isp_ini_map[tgt] = 0;
2189 lp->ini_map_idx = 0;
2190 lp->state = FC_PORTDB_STATE_NIL;
2191 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt,
2192 "Gone Device Timeout");
2193 isp_make_gone(isp, tgt);
2196 isp->isp_osinfo.gdt = timeout(isp_gdt, isp, hz);
2198 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
2199 "stopping Gone Device Timer");
2200 isp->isp_osinfo.gdt_running = 0;
2206 * Loop Down Timer Function- when loop goes down, a timer is started and
2207 * and after it expires we come here and take all probational devices that
2208 * the OS knows about and the tell the OS that they've gone away.
2210 * We don't clear the devices out of our port database because, when loop
2211 * come back up, we have to do some actual cleanup with the chip at that
2212 * point (implicit PLOGO, e.g., to get the chip's port database state right).
2217 ispsoftc_t *isp = arg;
2221 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Loop Down Timer expired");
2225 * Notify to the OS all targets who we now consider have departed.
2227 for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
2228 lp = &FCPARAM(isp)->portdb[dbidx];
2230 if (lp->state != FC_PORTDB_STATE_PROBATIONAL) {
2233 if (lp->ini_map_idx == 0) {
2238 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST!
2242 * Mark that we've announced that this device is gone....
2247 * but *don't* change the state of the entry. Just clear
2248 * any target id stuff and announce to CAM that the
2249 * device is gone. This way any necessary PLOGO stuff
2250 * will happen when loop comes back up.
2253 tgt = lp->ini_map_idx - 1;
2254 FCPARAM(isp)->isp_ini_map[tgt] = 0;
2255 lp->ini_map_idx = 0;
2256 isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt,
2257 "Loop Down Timeout");
2258 isp_make_gone(isp, tgt);
2262 * The loop down timer has expired. Wake up the kthread
2263 * to notice that fact (or make it false).
2265 isp->isp_osinfo.loop_down_time = isp->isp_osinfo.loop_down_limit+1;
2266 #if __FreeBSD_version < 500000
2267 wakeup(&isp->isp_osinfo.kproc);
2270 cv_signal(&isp->isp_osinfo.kthread_cv);
2272 wakeup(&isp->isp_osinfo.kthread_cv);
2279 isp_kthread(void *arg)
2281 ispsoftc_t *isp = arg;
2283 #if __FreeBSD_version < 500000
2289 mtx_lock(&isp->isp_lock);
2295 * The first loop is for our usage where we have yet to have
2296 * gotten good fibre channel state.
2299 int wasfrozen, lb, lim;
2301 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
2302 "isp_kthread: checking FC state");
2303 isp->isp_osinfo.mbox_sleep_ok = 1;
2304 lb = isp_fc_runstate(isp, 250000);
2305 isp->isp_osinfo.mbox_sleep_ok = 0;
2308 * Increment loop down time by the last sleep interval
2310 isp->isp_osinfo.loop_down_time += slp;
2313 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
2314 "kthread: FC loop not up (down count %d)",
2315 isp->isp_osinfo.loop_down_time);
2317 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
2318 "kthread: FC got to %d (down count %d)",
2319 lb, isp->isp_osinfo.loop_down_time);
2324 * If we've never seen loop up and we've waited longer
2325 * than quickboot time, or we've seen loop up but we've
2326 * waited longer than loop_down_limit, give up and go
2327 * to sleep until loop comes up.
2329 if (FCPARAM(isp)->loop_seen_once == 0) {
2330 lim = isp_quickboot_time;
2332 lim = isp->isp_osinfo.loop_down_limit;
2334 if (isp->isp_osinfo.loop_down_time >= lim) {
2335 isp_freeze_loopdown(isp, "loop limit hit");
2337 } else if (isp->isp_osinfo.loop_down_time < 10) {
2339 } else if (isp->isp_osinfo.loop_down_time < 30) {
2341 } else if (isp->isp_osinfo.loop_down_time < 60) {
2343 } else if (isp->isp_osinfo.loop_down_time < 120) {
2350 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
2351 "isp_kthread: FC state OK");
2352 isp->isp_osinfo.loop_down_time = 0;
2357 * If we'd frozen the simq, unfreeze it now so that CAM
2358 * can start sending us commands. If the FC state isn't
2359 * okay yet, they'll hit that in isp_start which will
2360 * freeze the queue again.
2362 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
2363 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
2364 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
2365 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
2366 "isp_kthread: releasing simq");
2367 ISPLOCK_2_CAMLOCK(isp);
2368 xpt_release_simq(isp->isp_sim, 1);
2369 CAMLOCK_2_ISPLOCK(isp);
2371 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
2372 "isp_kthread: sleep time %d", slp);
2373 #if __FreeBSD_version < 500000
2374 tsleep(&isp->isp_osinfo.kproc, PRIBIO, "ispf",
2378 cv_timed_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock,
2381 (void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "ispf",
2386 * If slp is zero, we're waking up for the first time after
2387 * things have been okay. In this case, we set a deferral state
2388 * for all commands and delay hysteresis seconds before starting
2389 * the FC state evaluation. This gives the loop/fabric a chance
2392 if (slp == 0 && isp->isp_osinfo.hysteresis) {
2393 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
2394 "isp_kthread: sleep hysteresis tick time %d",
2395 isp->isp_osinfo.hysteresis * hz);
2396 (void) tsleep(&isp_fabric_hysteresis, PRIBIO, "ispT",
2397 (isp->isp_osinfo.hysteresis * hz));
2403 isp_action(struct cam_sim *sim, union ccb *ccb)
2405 int bus, tgt, error, lim;
2407 struct ccb_trans_settings *cts;
2409 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
2411 isp = (ispsoftc_t *)cam_sim_softc(sim);
2412 ccb->ccb_h.sim_priv.entries[0].field = 0;
2413 ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2414 if (isp->isp_state != ISP_RUNSTATE &&
2415 ccb->ccb_h.func_code == XPT_SCSI_IO) {
2416 CAMLOCK_2_ISPLOCK(isp);
2418 if (isp->isp_state != ISP_INITSTATE) {
2421 * Lie. Say it was a selection timeout.
2423 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
2424 xpt_freeze_devq(ccb->ccb_h.path, 1);
2428 isp->isp_state = ISP_RUNSTATE;
2429 ISPLOCK_2_CAMLOCK(isp);
2431 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2434 switch (ccb->ccb_h.func_code) {
2435 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2437 * Do a couple of preliminary checks...
2439 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2440 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2441 ccb->ccb_h.status = CAM_REQ_INVALID;
2447 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2448 xpt_print(ccb->ccb_h.path, "invalid target\n");
2449 ccb->ccb_h.status = CAM_PATH_INVALID;
2450 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2451 xpt_print(ccb->ccb_h.path, "invalid lun\n");
2452 ccb->ccb_h.status = CAM_PATH_INVALID;
2454 if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2459 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2460 CAMLOCK_2_ISPLOCK(isp);
2461 error = isp_start((XS_T *) ccb);
2464 XS_CMD_S_CLEAR(ccb);
2465 ISPLOCK_2_CAMLOCK(isp);
2466 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2467 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2468 int ms = ccb->ccb_h.timeout;
2469 if (ms == CAM_TIME_DEFAULT) {
2472 ccb->ccb_h.timeout_ch =
2473 timeout(isp_watchdog, ccb, isp_mstohz(ms));
2475 callout_handle_init(&ccb->ccb_h.timeout_ch);
2480 * This can only happen for Fibre Channel
2482 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2485 * Handle initial and subsequent loop down cases
2487 if (FCPARAM(isp)->loop_seen_once == 0) {
2488 lim = isp_quickboot_time;
2490 lim = isp->isp_osinfo.loop_down_limit;
2492 if (isp->isp_osinfo.loop_down_time >= lim) {
2493 isp_prt(isp, ISP_LOGDEBUG0,
2494 "%d.%d downtime (%d) > lim (%d)",
2495 XS_TGT(ccb), XS_LUN(ccb),
2496 isp->isp_osinfo.loop_down_time, lim);
2498 CAM_SEL_TIMEOUT|CAM_DEV_QFRZN;
2499 xpt_freeze_devq(ccb->ccb_h.path, 1);
2500 ISPLOCK_2_CAMLOCK(isp);
2504 isp_prt(isp, ISP_LOGDEBUG0,
2505 "%d.%d retry later", XS_TGT(ccb), XS_LUN(ccb));
2507 * Otherwise, retry in a while.
2509 ISPLOCK_2_CAMLOCK(isp);
2510 cam_freeze_devq(ccb->ccb_h.path);
2511 cam_release_devq(ccb->ccb_h.path,
2512 RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0);
2513 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2517 ISPLOCK_2_CAMLOCK(isp);
2518 cam_freeze_devq(ccb->ccb_h.path);
2519 cam_release_devq(ccb->ccb_h.path,
2520 RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0);
2524 isp_done((struct ccb_scsiio *) ccb);
2525 ISPLOCK_2_CAMLOCK(isp);
2528 ISPLOCK_2_CAMLOCK(isp);
2529 isp_prt(isp, ISP_LOGERR,
2530 "What's this? 0x%x at %d in file %s",
2531 error, __LINE__, __FILE__);
2532 XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2537 #ifdef ISP_TARGET_MODE
2538 case XPT_EN_LUN: /* Enable LUN as a target */
2541 CAMLOCK_2_ISPLOCK(isp);
2542 seq = isp_en_lun(isp, ccb);
2544 ISPLOCK_2_CAMLOCK(isp);
2548 for (i = 0; isp->isp_osinfo.leact[seq] && i < 30 * 1000; i++) {
2550 uint16_t sema, mbox;
2551 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
2552 isp_intr(isp, isr, sema, mbox);
2556 ISPLOCK_2_CAMLOCK(isp);
2559 case XPT_NOTIFY_ACK: /* recycle notify ack */
2560 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */
2561 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
2564 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2566 ccb->ccb_h.status = CAM_LUN_INVALID;
2570 ccb->ccb_h.sim_priv.entries[0].field = 0;
2571 ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2572 ccb->ccb_h.flags = 0;
2574 CAMLOCK_2_ISPLOCK(isp);
2575 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2577 * Note that the command itself may not be done-
2578 * it may not even have had the first CTIO sent.
2581 isp_prt(isp, ISP_LOGTDEBUG0,
2582 "Put FREE ATIO, lun %d, count now %d",
2583 ccb->ccb_h.target_lun, tptr->atio_count);
2584 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2586 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2588 isp_prt(isp, ISP_LOGTDEBUG0,
2589 "Put FREE INOT, lun %d, count now %d",
2590 ccb->ccb_h.target_lun, tptr->inot_count);
2591 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2594 isp_prt(isp, ISP_LOGWARN, "Got Notify ACK");;
2596 rls_lun_statep(isp, tptr);
2597 ccb->ccb_h.status = CAM_REQ_INPROG;
2598 ISPLOCK_2_CAMLOCK(isp);
2601 case XPT_CONT_TARGET_IO:
2603 CAMLOCK_2_ISPLOCK(isp);
2604 isp_target_start_ctio(isp, ccb);
2605 ISPLOCK_2_CAMLOCK(isp);
2609 case XPT_RESET_DEV: /* BDR the specified SCSI device */
2611 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2612 tgt = ccb->ccb_h.target_id;
2615 CAMLOCK_2_ISPLOCK(isp);
2616 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2617 ISPLOCK_2_CAMLOCK(isp);
2619 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2621 ccb->ccb_h.status = CAM_REQ_CMP;
2625 case XPT_ABORT: /* Abort the specified CCB */
2627 union ccb *accb = ccb->cab.abort_ccb;
2628 CAMLOCK_2_ISPLOCK(isp);
2629 switch (accb->ccb_h.func_code) {
2630 #ifdef ISP_TARGET_MODE
2631 case XPT_ACCEPT_TARGET_IO:
2632 case XPT_IMMED_NOTIFY:
2633 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2635 case XPT_CONT_TARGET_IO:
2636 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2637 ccb->ccb_h.status = CAM_UA_ABORT;
2641 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2643 ccb->ccb_h.status = CAM_UA_ABORT;
2645 ccb->ccb_h.status = CAM_REQ_CMP;
2649 ccb->ccb_h.status = CAM_REQ_INVALID;
2652 ISPLOCK_2_CAMLOCK(isp);
2656 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
2657 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
2659 if (!IS_CURRENT_SETTINGS(cts)) {
2660 ccb->ccb_h.status = CAM_REQ_INVALID;
2664 tgt = cts->ccb_h.target_id;
2665 CAMLOCK_2_ISPLOCK(isp);
2667 struct ccb_trans_settings_scsi *scsi =
2668 &cts->proto_specific.scsi;
2669 struct ccb_trans_settings_spi *spi =
2670 &cts->xport_specific.spi;
2671 sdparam *sdp = isp->isp_param;
2674 if (spi->valid == 0 && scsi->valid == 0) {
2675 ISPLOCK_2_CAMLOCK(isp);
2676 ccb->ccb_h.status = CAM_REQ_CMP;
2681 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2684 * We always update (internally) from goal_flags
2685 * so any request to change settings just gets
2686 * vectored to that location.
2688 dptr = &sdp->isp_devparam[tgt].goal_flags;
2690 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2691 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2692 *dptr |= DPARM_DISC;
2694 *dptr &= ~DPARM_DISC;
2697 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2698 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2699 *dptr |= DPARM_TQING;
2701 *dptr &= ~DPARM_TQING;
2704 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2705 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2706 *dptr |= DPARM_WIDE;
2708 *dptr &= ~DPARM_WIDE;
2714 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2715 (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2716 (spi->sync_period && spi->sync_offset)) {
2717 *dptr |= DPARM_SYNC;
2719 * XXX: CHECK FOR LEGALITY
2721 sdp->isp_devparam[tgt].goal_period =
2723 sdp->isp_devparam[tgt].goal_offset =
2726 *dptr &= ~DPARM_SYNC;
2728 isp_prt(isp, ISP_LOGDEBUG0,
2729 "SET (%d.%d.%d) to flags %x off %x per %x",
2730 bus, tgt, cts->ccb_h.target_lun,
2731 sdp->isp_devparam[tgt].goal_flags,
2732 sdp->isp_devparam[tgt].goal_offset,
2733 sdp->isp_devparam[tgt].goal_period);
2734 sdp->isp_devparam[tgt].dev_update = 1;
2735 isp->isp_update |= (1 << bus);
2737 ISPLOCK_2_CAMLOCK(isp);
2738 ccb->ccb_h.status = CAM_REQ_CMP;
2741 case XPT_GET_TRAN_SETTINGS:
2743 tgt = cts->ccb_h.target_id;
2744 CAMLOCK_2_ISPLOCK(isp);
2746 fcparam *fcp = isp->isp_param;
2747 struct ccb_trans_settings_scsi *scsi =
2748 &cts->proto_specific.scsi;
2749 struct ccb_trans_settings_fc *fc =
2750 &cts->xport_specific.fc;
2752 cts->protocol = PROTO_SCSI;
2753 cts->protocol_version = SCSI_REV_2;
2754 cts->transport = XPORT_FC;
2755 cts->transport_version = 0;
2757 scsi->valid = CTS_SCSI_VALID_TQ;
2758 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2759 fc->valid = CTS_FC_VALID_SPEED;
2760 if (fcp->isp_gbspeed == 2) {
2761 fc->bitrate = 200000;
2763 fc->bitrate = 100000;
2765 if (tgt > 0 && tgt < MAX_FC_TARG) {
2766 fcportdb_t *lp = &fcp->portdb[tgt];
2767 fc->wwnn = lp->node_wwn;
2768 fc->wwpn = lp->port_wwn;
2769 fc->port = lp->portid;
2770 fc->valid |= CTS_FC_VALID_WWNN |
2771 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2774 struct ccb_trans_settings_scsi *scsi =
2775 &cts->proto_specific.scsi;
2776 struct ccb_trans_settings_spi *spi =
2777 &cts->xport_specific.spi;
2778 sdparam *sdp = isp->isp_param;
2779 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2780 uint16_t dval, pval, oval;
2784 if (IS_CURRENT_SETTINGS(cts)) {
2785 sdp->isp_devparam[tgt].dev_refresh = 1;
2786 isp->isp_update |= (1 << bus);
2787 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2789 dval = sdp->isp_devparam[tgt].actv_flags;
2790 oval = sdp->isp_devparam[tgt].actv_offset;
2791 pval = sdp->isp_devparam[tgt].actv_period;
2793 dval = sdp->isp_devparam[tgt].nvrm_flags;
2794 oval = sdp->isp_devparam[tgt].nvrm_offset;
2795 pval = sdp->isp_devparam[tgt].nvrm_period;
2798 cts->protocol = PROTO_SCSI;
2799 cts->protocol_version = SCSI_REV_2;
2800 cts->transport = XPORT_SPI;
2801 cts->transport_version = 2;
2807 if (dval & DPARM_DISC) {
2808 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2810 if ((dval & DPARM_SYNC) && oval && pval) {
2811 spi->sync_offset = oval;
2812 spi->sync_period = pval;
2814 spi->sync_offset = 0;
2815 spi->sync_period = 0;
2817 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2818 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2819 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2820 if (dval & DPARM_WIDE) {
2821 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2823 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2825 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2826 scsi->valid = CTS_SCSI_VALID_TQ;
2827 if (dval & DPARM_TQING) {
2828 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2830 spi->valid |= CTS_SPI_VALID_DISC;
2832 isp_prt(isp, ISP_LOGDEBUG0,
2833 "GET %s (%d.%d.%d) to flags %x off %x per %x",
2834 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2835 bus, tgt, cts->ccb_h.target_lun, dval, oval, pval);
2837 ISPLOCK_2_CAMLOCK(isp);
2838 ccb->ccb_h.status = CAM_REQ_CMP;
2842 case XPT_CALC_GEOMETRY:
2843 #if __FreeBSD_version < 500000
2845 struct ccb_calc_geometry *ccg;
2846 u_int32_t secs_per_cylinder;
2850 if (ccg->block_size == 0) {
2851 ccb->ccb_h.status = CAM_REQ_INVALID;
2855 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
2856 if (size_mb > 1024) {
2858 ccg->secs_per_track = 63;
2861 ccg->secs_per_track = 32;
2863 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2864 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2865 ccb->ccb_h.status = CAM_REQ_CMP;
2871 cam_calc_geometry(&ccb->ccg, /*extended*/1);
2876 case XPT_RESET_BUS: /* Reset the specified bus */
2877 bus = cam_sim_bus(sim);
2878 CAMLOCK_2_ISPLOCK(isp);
2879 error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2880 ISPLOCK_2_CAMLOCK(isp);
2882 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2884 if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2885 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2886 else if (isp->isp_path != NULL)
2887 xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2888 ccb->ccb_h.status = CAM_REQ_CMP;
2893 case XPT_TERM_IO: /* Terminate the I/O process */
2894 ccb->ccb_h.status = CAM_REQ_INVALID;
2898 case XPT_PATH_INQ: /* Path routing inquiry */
2900 struct ccb_pathinq *cpi = &ccb->cpi;
2902 cpi->version_num = 1;
2903 #ifdef ISP_TARGET_MODE
2904 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2906 cpi->target_sprt = 0;
2908 cpi->hba_eng_cnt = 0;
2909 cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2910 cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2911 cpi->bus_id = cam_sim_bus(sim);
2913 cpi->hba_misc = PIM_NOBUSRESET;
2915 * Because our loop ID can shift from time to time,
2916 * make our initiator ID out of range of our bus.
2918 cpi->initiator_id = cpi->max_target + 1;
2921 * Set base transfer capabilities for Fibre Channel.
2922 * Technically not correct because we don't know
2923 * what media we're running on top of- but we'll
2924 * look good if we always say 100MB/s.
2926 if (FCPARAM(isp)->isp_gbspeed == 2)
2927 cpi->base_transfer_speed = 200000;
2929 cpi->base_transfer_speed = 100000;
2930 cpi->hba_inquiry = PI_TAG_ABLE;
2931 cpi->transport = XPORT_FC;
2932 cpi->transport_version = 0; /* WHAT'S THIS FOR? */
2934 sdparam *sdp = isp->isp_param;
2935 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2936 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2938 cpi->initiator_id = sdp->isp_initiator_id;
2939 cpi->base_transfer_speed = 3300;
2940 cpi->transport = XPORT_SPI;
2941 cpi->transport_version = 2; /* WHAT'S THIS FOR? */
2943 cpi->protocol = PROTO_SCSI;
2944 cpi->protocol_version = SCSI_REV_2;
2945 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2946 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2947 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2948 cpi->unit_number = cam_sim_unit(sim);
2949 cpi->ccb_h.status = CAM_REQ_CMP;
2954 ccb->ccb_h.status = CAM_REQ_INVALID;
2960 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2963 isp_done(struct ccb_scsiio *sccb)
2965 ispsoftc_t *isp = XS_ISP(sccb);
2968 XS_SETERR(sccb, CAM_REQ_CMP);
2970 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2971 (sccb->scsi_status != SCSI_STATUS_OK)) {
2972 sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2973 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2974 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2975 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2977 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2981 sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2982 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2983 isp_prt(isp, ISP_LOGDEBUG0,
2984 "target %d lun %d CAM status 0x%x SCSI status 0x%x",
2985 XS_TGT(sccb), XS_LUN(sccb), sccb->ccb_h.status,
2987 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2988 sccb->ccb_h.status |= CAM_DEV_QFRZN;
2989 xpt_freeze_devq(sccb->ccb_h.path, 1);
2993 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2994 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2995 xpt_print(sccb->ccb_h.path,
2996 "cam completion status 0x%x\n", sccb->ccb_h.status);
2999 XS_CMD_S_DONE(sccb);
3000 if (XS_CMD_WDOG_P(sccb) == 0) {
3001 untimeout(isp_watchdog, sccb, sccb->ccb_h.timeout_ch);
3002 if (XS_CMD_GRACE_P(sccb)) {
3003 isp_prt(isp, ISP_LOGDEBUG2,
3004 "finished command on borrowed time");
3006 XS_CMD_S_CLEAR(sccb);
3007 ISPLOCK_2_CAMLOCK(isp);
3008 xpt_done((union ccb *) sccb);
3009 CAMLOCK_2_ISPLOCK(isp);
3014 isp_async(ispsoftc_t *isp, ispasync_t cmd, void *arg)
3017 static const char prom[] =
3018 "PortID 0x%06x handle 0x%x role %s %s\n"
3019 " WWNN 0x%08x%08x WWPN 0x%08x%08x";
3020 static const char prom2[] =
3021 "PortID 0x%06x handle 0x%x role %s %s tgt %u\n"
3022 " WWNN 0x%08x%08x WWPN 0x%08x%08x";
3026 struct cam_path *tmppath;
3029 case ISPASYNC_NEW_TGT_PARAMS:
3031 struct ccb_trans_settings_scsi *scsi;
3032 struct ccb_trans_settings_spi *spi;
3034 sdparam *sdp = isp->isp_param;
3035 struct ccb_trans_settings cts;
3037 memset(&cts, 0, sizeof (struct ccb_trans_settings));
3039 tgt = *((int *)arg);
3040 bus = (tgt >> 16) & 0xffff;
3043 ISPLOCK_2_CAMLOCK(isp);
3044 if (xpt_create_path(&tmppath, NULL,
3045 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
3046 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
3047 CAMLOCK_2_ISPLOCK(isp);
3048 isp_prt(isp, ISP_LOGWARN,
3049 "isp_async cannot make temp path for %d.%d",
3054 CAMLOCK_2_ISPLOCK(isp);
3055 flags = sdp->isp_devparam[tgt].actv_flags;
3056 cts.type = CTS_TYPE_CURRENT_SETTINGS;
3057 cts.protocol = PROTO_SCSI;
3058 cts.transport = XPORT_SPI;
3060 scsi = &cts.proto_specific.scsi;
3061 spi = &cts.xport_specific.spi;
3063 if (flags & DPARM_TQING) {
3064 scsi->valid |= CTS_SCSI_VALID_TQ;
3065 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3068 if (flags & DPARM_DISC) {
3069 spi->valid |= CTS_SPI_VALID_DISC;
3070 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3072 spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
3073 if (flags & DPARM_WIDE) {
3074 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3076 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3078 if (flags & DPARM_SYNC) {
3079 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3080 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3081 spi->sync_period = sdp->isp_devparam[tgt].actv_period;
3082 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
3084 isp_prt(isp, ISP_LOGDEBUG2,
3085 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
3086 bus, tgt, sdp->isp_devparam[tgt].actv_period,
3087 sdp->isp_devparam[tgt].actv_offset, flags);
3088 xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
3089 ISPLOCK_2_CAMLOCK(isp);
3090 xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
3091 xpt_free_path(tmppath);
3092 CAMLOCK_2_ISPLOCK(isp);
3095 case ISPASYNC_BUS_RESET:
3096 bus = *((int *)arg);
3097 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
3099 if (bus > 0 && isp->isp_path2) {
3100 ISPLOCK_2_CAMLOCK(isp);
3101 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
3102 CAMLOCK_2_ISPLOCK(isp);
3103 } else if (isp->isp_path) {
3104 ISPLOCK_2_CAMLOCK(isp);
3105 xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
3106 CAMLOCK_2_ISPLOCK(isp);
3111 msg = "LIP Received";
3114 case ISPASYNC_LOOP_RESET:
3119 case ISPASYNC_LOOP_DOWN:
3123 if (isp->isp_path) {
3124 isp_freeze_loopdown(isp, msg);
3126 if (isp->isp_osinfo.ldt_running == 0) {
3127 isp->isp_osinfo.ldt = timeout(isp_ldt, isp,
3128 isp->isp_osinfo.loop_down_limit * hz);
3129 isp->isp_osinfo.ldt_running = 1;
3130 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
3131 "starting Loop Down Timer");
3133 isp_prt(isp, ISP_LOGINFO, msg);
3135 case ISPASYNC_LOOP_UP:
3137 * Now we just note that Loop has come up. We don't
3138 * actually do anything because we're waiting for a
3139 * Change Notify before activating the FC cleanup
3140 * thread to look at the state of the loop again.
3142 isp_prt(isp, ISP_LOGINFO, "Loop UP");
3144 case ISPASYNC_DEV_ARRIVED:
3147 if ((isp->isp_role & ISP_ROLE_INITIATOR) &&
3148 (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) {
3149 int dbidx = lp - FCPARAM(isp)->portdb;
3152 for (i = 0; i < MAX_FC_TARG; i++) {
3153 if (i >= FL_ID && i <= SNS_ID) {
3156 if (FCPARAM(isp)->isp_ini_map[i] == 0) {
3160 if (i < MAX_FC_TARG) {
3161 FCPARAM(isp)->isp_ini_map[i] = dbidx + 1;
3162 lp->ini_map_idx = i + 1;
3164 isp_prt(isp, ISP_LOGWARN, "out of target ids");
3165 isp_dump_portdb(isp);
3168 if (lp->ini_map_idx) {
3169 tgt = lp->ini_map_idx - 1;
3170 isp_prt(isp, ISP_LOGCONFIG, prom2,
3171 lp->portid, lp->handle,
3172 roles[lp->roles], "arrived at", tgt,
3173 (uint32_t) (lp->node_wwn >> 32),
3174 (uint32_t) lp->node_wwn,
3175 (uint32_t) (lp->port_wwn >> 32),
3176 (uint32_t) lp->port_wwn);
3177 isp_make_here(isp, tgt);
3179 isp_prt(isp, ISP_LOGCONFIG, prom,
3180 lp->portid, lp->handle,
3181 roles[lp->roles], "arrived",
3182 (uint32_t) (lp->node_wwn >> 32),
3183 (uint32_t) lp->node_wwn,
3184 (uint32_t) (lp->port_wwn >> 32),
3185 (uint32_t) lp->port_wwn);
3188 case ISPASYNC_DEV_CHANGED:
3190 if (isp_change_is_bad) {
3191 lp->state = FC_PORTDB_STATE_NIL;
3192 if (lp->ini_map_idx) {
3193 tgt = lp->ini_map_idx - 1;
3194 FCPARAM(isp)->isp_ini_map[tgt] = 0;
3195 lp->ini_map_idx = 0;
3196 isp_prt(isp, ISP_LOGCONFIG, prom3,
3197 lp->portid, tgt, "change is bad");
3198 isp_make_gone(isp, tgt);
3200 isp_prt(isp, ISP_LOGCONFIG, prom,
3201 lp->portid, lp->handle,
3203 "changed and departed",
3204 (uint32_t) (lp->node_wwn >> 32),
3205 (uint32_t) lp->node_wwn,
3206 (uint32_t) (lp->port_wwn >> 32),
3207 (uint32_t) lp->port_wwn);
3210 lp->portid = lp->new_portid;
3211 lp->roles = lp->new_roles;
3212 if (lp->ini_map_idx) {
3213 int t = lp->ini_map_idx - 1;
3214 FCPARAM(isp)->isp_ini_map[t] =
3215 (lp - FCPARAM(isp)->portdb) + 1;
3216 tgt = lp->ini_map_idx - 1;
3217 isp_prt(isp, ISP_LOGCONFIG, prom2,
3218 lp->portid, lp->handle,
3219 roles[lp->roles], "changed at", tgt,
3220 (uint32_t) (lp->node_wwn >> 32),
3221 (uint32_t) lp->node_wwn,
3222 (uint32_t) (lp->port_wwn >> 32),
3223 (uint32_t) lp->port_wwn);
3225 isp_prt(isp, ISP_LOGCONFIG, prom,
3226 lp->portid, lp->handle,
3227 roles[lp->roles], "changed",
3228 (uint32_t) (lp->node_wwn >> 32),
3229 (uint32_t) lp->node_wwn,
3230 (uint32_t) (lp->port_wwn >> 32),
3231 (uint32_t) lp->port_wwn);
3235 case ISPASYNC_DEV_STAYED:
3237 if (lp->ini_map_idx) {
3238 tgt = lp->ini_map_idx - 1;
3239 isp_prt(isp, ISP_LOGCONFIG, prom2,
3240 lp->portid, lp->handle,
3241 roles[lp->roles], "stayed at", tgt,
3242 (uint32_t) (lp->node_wwn >> 32),
3243 (uint32_t) lp->node_wwn,
3244 (uint32_t) (lp->port_wwn >> 32),
3245 (uint32_t) lp->port_wwn);
3247 isp_prt(isp, ISP_LOGCONFIG, prom,
3248 lp->portid, lp->handle,
3249 roles[lp->roles], "stayed",
3250 (uint32_t) (lp->node_wwn >> 32),
3251 (uint32_t) lp->node_wwn,
3252 (uint32_t) (lp->port_wwn >> 32),
3253 (uint32_t) lp->port_wwn);
3256 case ISPASYNC_DEV_GONE:
3259 * If this has a virtual target and we haven't marked it
3260 * that we're going to have isp_gdt tell the OS it's gone,
3261 * set the isp_gdt timer running on it.
3263 * If it isn't marked that isp_gdt is going to get rid of it,
3264 * announce that it's gone.
3266 if (lp->ini_map_idx && lp->reserved == 0) {
3268 lp->new_reserved = isp->isp_osinfo.gone_device_time;
3269 lp->state = FC_PORTDB_STATE_ZOMBIE;
3270 if (isp->isp_osinfo.gdt_running == 0) {
3271 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
3272 "starting Gone Device Timer");
3273 isp->isp_osinfo.gdt = timeout(isp_gdt, isp, hz);
3274 isp->isp_osinfo.gdt_running = 1;
3276 tgt = lp->ini_map_idx - 1;
3277 isp_prt(isp, ISP_LOGCONFIG, prom2,
3278 lp->portid, lp->handle,
3279 roles[lp->roles], "gone zombie at", tgt,
3280 (uint32_t) (lp->node_wwn >> 32),
3281 (uint32_t) lp->node_wwn,
3282 (uint32_t) (lp->port_wwn >> 32),
3283 (uint32_t) lp->port_wwn);
3284 } else if (lp->reserved == 0) {
3285 isp_prt(isp, ISP_LOGCONFIG, prom,
3286 lp->portid, lp->handle,
3287 roles[lp->roles], "departed",
3288 (uint32_t) (lp->node_wwn >> 32),
3289 (uint32_t) lp->node_wwn,
3290 (uint32_t) (lp->port_wwn >> 32),
3291 (uint32_t) lp->port_wwn);
3294 case ISPASYNC_CHANGE_NOTIFY:
3297 if (arg == ISPASYNC_CHANGE_PDB) {
3298 msg = "Port Database Changed";
3299 } else if (arg == ISPASYNC_CHANGE_SNS) {
3300 msg = "Name Server Database Changed";
3302 msg = "Other Change Notify";
3305 * If the loop down timer is running, cancel it.
3307 if (isp->isp_osinfo.ldt_running) {
3308 isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
3309 "Stopping Loop Down Timer");
3310 isp->isp_osinfo.ldt_running = 0;
3311 untimeout(isp_ldt, isp, isp->isp_osinfo.ldt);
3312 callout_handle_init(&isp->isp_osinfo.ldt);
3314 isp_prt(isp, ISP_LOGINFO, msg);
3315 isp_freeze_loopdown(isp, msg);
3316 #if __FreeBSD_version < 500000
3317 wakeup(&isp->isp_osinfo.kproc);
3320 cv_signal(&isp->isp_osinfo.kthread_cv);
3322 wakeup(&isp->isp_osinfo.kthread_cv);
3327 #ifdef ISP_TARGET_MODE
3328 case ISPASYNC_TARGET_NOTIFY:
3330 tmd_notify_t *nt = arg;
3331 isp_prt(isp, ISP_LOGALL,
3332 "target notify code 0x%x", nt->nt_ncode);
3335 case ISPASYNC_TARGET_ACTION:
3336 switch (((isphdr_t *)arg)->rqs_entry_type) {
3338 isp_prt(isp, ISP_LOGWARN,
3339 "event 0x%x for unhandled target action",
3340 ((isphdr_t *)arg)->rqs_entry_type);
3342 case RQSTYPE_NOTIFY:
3344 rv = isp_handle_platform_notify_scsi(isp,
3345 (in_entry_t *) arg);
3347 rv = isp_handle_platform_notify_fc(isp,
3348 (in_fcentry_t *) arg);
3352 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
3355 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
3360 rv = isp_handle_platform_ctio(isp, arg);
3362 case RQSTYPE_ENABLE_LUN:
3363 case RQSTYPE_MODIFY_LUN:
3364 isp_ledone(isp, (lun_entry_t *) arg);
3369 case ISPASYNC_FW_CRASH:
3371 uint16_t mbox1, mbox6;
3372 mbox1 = ISP_READ(isp, OUTMAILBOX1);
3373 if (IS_DUALBUS(isp)) {
3374 mbox6 = ISP_READ(isp, OUTMAILBOX6);
3378 isp_prt(isp, ISP_LOGERR,
3379 "Internal Firmware Error on bus %d @ RISC Address 0x%x",
3381 #ifdef ISP_FW_CRASH_DUMP
3383 * XXX: really need a thread to do this right.
3386 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT;
3387 FCPARAM(isp)->isp_loopstate = LOOP_NIL;
3388 isp_freeze_loopdown(isp, "f/w crash");
3392 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3396 case ISPASYNC_UNHANDLED_RESPONSE:
3399 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3407 * Locks are held before coming here.
3410 isp_uninit(ispsoftc_t *isp)
3413 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_RESET);
3415 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
3417 ISP_DISABLE_INTS(isp);
3421 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...)
3424 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3427 printf("%s: ", device_get_nameunit(isp->isp_dev));
3435 isp_nanotime_sub(struct timespec *b, struct timespec *a)
3438 struct timespec x = *b;
3440 elapsed = GET_NANOSEC(&x);
3447 isp_mbox_acquire(ispsoftc_t *isp)
3449 if (isp->isp_osinfo.mboxbsy) {
3452 isp->isp_osinfo.mboxcmd_done = 0;
3453 isp->isp_osinfo.mboxbsy = 1;
3459 isp_mbox_wait_complete(ispsoftc_t *isp, mbreg_t *mbp)
3461 int usecs = mbp->timeout;
3465 usecs = MBCMD_DEFAULT_TIMEOUT;
3467 if (isp->isp_mbxwrk0) {
3468 usecs *= isp->isp_mbxwrk0;
3470 if (isp->isp_osinfo.mbox_sleep_ok) {
3471 int ms = usecs / 1000;
3472 isp->isp_osinfo.mbox_sleep_ok = 0;
3473 isp->isp_osinfo.mbox_sleeping = 1;
3474 #if __FreeBSD_version < 500000 || !defined(ISP_SMPLOCK)
3475 tsleep(&isp->isp_mbxworkp, PRIBIO, "ispmbx_sleep",
3478 msleep(&isp->isp_mbxworkp, &isp->isp_mtx, PRIBIO,
3479 "ispmbx_sleep", isp_mstohz(ms));
3481 isp->isp_osinfo.mbox_sleep_ok = 1;
3482 isp->isp_osinfo.mbox_sleeping = 0;
3484 for (j = 0; j < usecs; j += 100) {
3486 uint16_t sema, mbox;
3487 if (isp->isp_osinfo.mboxcmd_done) {
3490 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
3491 isp_intr(isp, isr, sema, mbox);
3492 if (isp->isp_osinfo.mboxcmd_done) {
3499 if (isp->isp_osinfo.mboxcmd_done == 0) {
3500 isp_prt(isp, ISP_LOGWARN,
3501 "%s Mailbox Command (0x%x) Timeout",
3502 isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled",
3503 isp->isp_lastmbxcmd);
3504 mbp->param[0] = MBOX_TIMEOUT;
3505 isp->isp_osinfo.mboxcmd_done = 1;
3510 isp_mbox_notify_done(ispsoftc_t *isp)
3512 if (isp->isp_osinfo.mbox_sleeping) {
3513 wakeup(&isp->isp_mbxworkp);
3515 isp->isp_osinfo.mboxcmd_done = 1;
3519 isp_mbox_release(ispsoftc_t *isp)
3521 isp->isp_osinfo.mboxbsy = 0;
3528 t.tv_sec = ms / 1000;
3529 t.tv_usec = (ms % 1000) * 1000;