2 * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice immediately at the beginning of the file, without modification,
11 * this list of conditions, and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <dev/isp/isp_freebsd.h>
32 #include <sys/unistd.h>
33 #include <sys/kthread.h>
34 #include <machine/stdarg.h> /* for use by isp_prt below */
36 #include <sys/module.h>
37 #include <sys/ioccom.h>
38 #include <dev/isp/isp_ioctl.h>
41 MODULE_VERSION(isp, 1);
42 int isp_announced = 0;
43 ispfwfunc *isp_get_firmware_p = NULL;
45 static d_ioctl_t ispioctl;
46 static void isp_intr_enable(void *);
47 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
48 static void isp_poll(struct cam_sim *);
49 static timeout_t isp_watchdog;
50 static void isp_kthread(void *);
51 static void isp_action(struct cam_sim *, union ccb *);
54 #define ISP_CDEV_MAJOR 248
55 static struct cdevsw isp_cdevsw = {
60 .d_maj = ISP_CDEV_MAJOR,
64 static struct ispsoftc *isplist = NULL;
67 isp_attach(struct ispsoftc *isp)
69 int primary, secondary;
70 struct ccb_setasync csa;
71 struct cam_devq *devq;
73 struct cam_path *path;
76 * Establish (in case of 12X0) which bus is the primary.
83 * Create the device queue for our SIM(s).
85 devq = cam_simq_alloc(isp->isp_maxcmds);
91 * Construct our SIM entry.
93 ISPLOCK_2_CAMLOCK(isp);
94 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
95 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
98 CAMLOCK_2_ISPLOCK(isp);
101 CAMLOCK_2_ISPLOCK(isp);
103 isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
104 isp->isp_osinfo.ehook.ich_arg = isp;
105 ISPLOCK_2_CAMLOCK(isp);
106 if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
107 cam_sim_free(sim, TRUE);
108 CAMLOCK_2_ISPLOCK(isp);
109 isp_prt(isp, ISP_LOGERR,
110 "could not establish interrupt enable hook");
114 if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
115 cam_sim_free(sim, TRUE);
116 CAMLOCK_2_ISPLOCK(isp);
120 if (xpt_create_path(&path, NULL, cam_sim_path(sim),
121 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
122 xpt_bus_deregister(cam_sim_path(sim));
123 cam_sim_free(sim, TRUE);
124 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
125 CAMLOCK_2_ISPLOCK(isp);
129 xpt_setup_ccb(&csa.ccb_h, path, 5);
130 csa.ccb_h.func_code = XPT_SASYNC_CB;
131 csa.event_enable = AC_LOST_DEVICE;
132 csa.callback = isp_cam_async;
133 csa.callback_arg = sim;
134 xpt_action((union ccb *)&csa);
135 CAMLOCK_2_ISPLOCK(isp);
137 isp->isp_path = path;
139 * Create a kernel thread for fibre channel instances. We
140 * don't have dual channel FC cards.
143 ISPLOCK_2_CAMLOCK(isp);
144 /* XXX: LOCK VIOLATION */
145 cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
146 if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
147 RFHIGHPID, 0, "%s: fc_thrd",
148 device_get_nameunit(isp->isp_dev))) {
149 xpt_bus_deregister(cam_sim_path(sim));
150 cam_sim_free(sim, TRUE);
151 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
152 CAMLOCK_2_ISPLOCK(isp);
153 isp_prt(isp, ISP_LOGERR, "could not create kthread");
156 CAMLOCK_2_ISPLOCK(isp);
161 * If we have a second channel, construct SIM entry for that.
163 if (IS_DUALBUS(isp)) {
164 ISPLOCK_2_CAMLOCK(isp);
165 sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
166 device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
168 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
169 xpt_free_path(isp->isp_path);
171 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
174 if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
175 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
176 xpt_free_path(isp->isp_path);
177 cam_sim_free(sim, TRUE);
178 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
179 CAMLOCK_2_ISPLOCK(isp);
183 if (xpt_create_path(&path, NULL, cam_sim_path(sim),
184 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
185 xpt_bus_deregister(cam_sim_path(isp->isp_sim));
186 xpt_free_path(isp->isp_path);
187 xpt_bus_deregister(cam_sim_path(sim));
188 cam_sim_free(sim, TRUE);
189 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
190 CAMLOCK_2_ISPLOCK(isp);
194 xpt_setup_ccb(&csa.ccb_h, path, 5);
195 csa.ccb_h.func_code = XPT_SASYNC_CB;
196 csa.event_enable = AC_LOST_DEVICE;
197 csa.callback = isp_cam_async;
198 csa.callback_arg = sim;
199 xpt_action((union ccb *)&csa);
200 CAMLOCK_2_ISPLOCK(isp);
202 isp->isp_path2 = path;
205 #ifdef ISP_TARGET_MODE
206 cv_init(&isp->isp_osinfo.tgtcv0[0], "isp_tgcv0a");
207 cv_init(&isp->isp_osinfo.tgtcv0[1], "isp_tgcv0b");
208 cv_init(&isp->isp_osinfo.tgtcv1[0], "isp_tgcv1a");
209 cv_init(&isp->isp_osinfo.tgtcv1[1], "isp_tgcv1b");
212 * Create device nodes
214 (void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
215 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
217 if (isp->isp_role != ISP_ROLE_NONE) {
218 isp->isp_state = ISP_RUNSTATE;
221 if (isplist == NULL) {
224 struct ispsoftc *tmp = isplist;
225 while (tmp->isp_osinfo.next) {
226 tmp = tmp->isp_osinfo.next;
228 tmp->isp_osinfo.next = isp;
234 isp_freeze_loopdown(struct ispsoftc *isp, char *msg)
236 if (isp->isp_osinfo.simqfrozen == 0) {
237 isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
238 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
239 ISPLOCK_2_CAMLOCK(isp);
240 xpt_freeze_simq(isp->isp_sim, 1);
241 CAMLOCK_2_ISPLOCK(isp);
243 isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
244 isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
249 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
251 struct ispsoftc *isp;
256 if (minor(dev) == device_get_unit(isp->isp_dev)) {
259 isp = isp->isp_osinfo.next;
265 #ifdef ISP_FW_CRASH_DUMP
266 case ISP_GET_FW_CRASH_DUMP:
268 u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
273 sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
275 sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
278 void *uaddr = *((void **) addr);
279 if (copyout(ptr, uaddr, sz)) {
291 case ISP_FORCE_CRASH_DUMP:
293 isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)");
302 int olddblev = isp->isp_dblev;
303 isp->isp_dblev = *(int *)addr;
304 *(int *)addr = olddblev;
317 if (isp_fc_runstate(isp, 5 * 1000000)) {
328 if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
336 case ISP_FC_GETDINFO:
338 struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
341 if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
346 lp = &FCPARAM(isp)->portdb[ifc->loopid];
348 ifc->loopid = lp->loopid;
349 ifc->portid = lp->portid;
350 ifc->node_wwn = lp->node_wwn;
351 ifc->port_wwn = lp->port_wwn;
361 isp_stats_t *sp = (isp_stats_t *) addr;
363 MEMZERO(sp, sizeof (*sp));
364 sp->isp_stat_version = ISP_STATS_VERSION;
365 sp->isp_type = isp->isp_type;
366 sp->isp_revision = isp->isp_revision;
368 sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
369 sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
370 sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
371 sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
372 sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
373 sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
374 sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
375 sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
383 isp->isp_intbogus = 0;
384 isp->isp_intmboxc = 0;
385 isp->isp_intoasync = 0;
386 isp->isp_rsltccmplt = 0;
387 isp->isp_fphccmplt = 0;
388 isp->isp_rscchiwater = 0;
389 isp->isp_fpcchiwater = 0;
393 case ISP_FC_GETHINFO:
395 struct isp_hba_device *hba = (struct isp_hba_device *) addr;
396 MEMZERO(hba, sizeof (*hba));
398 hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
399 hba->fc_scsi_supported = 1;
400 hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
401 hba->fc_loopid = FCPARAM(isp)->isp_loopid;
402 hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn;
403 hba->active_port_wwn = FCPARAM(isp)->isp_portwwn;
408 case ISP_GET_FC_PARAM:
410 struct isp_fc_param *f = (struct isp_fc_param *) addr;
417 if (strcmp(f->param_name, "framelength") == 0) {
418 f->parameter = FCPARAM(isp)->isp_maxfrmlen;
422 if (strcmp(f->param_name, "exec_throttle") == 0) {
423 f->parameter = FCPARAM(isp)->isp_execthrottle;
427 if (strcmp(f->param_name, "fullduplex") == 0) {
428 if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
433 if (strcmp(f->param_name, "loopid") == 0) {
434 f->parameter = FCPARAM(isp)->isp_loopid;
441 case ISP_SET_FC_PARAM:
443 struct isp_fc_param *f = (struct isp_fc_param *) addr;
444 u_int32_t param = f->parameter;
451 if (strcmp(f->param_name, "framelength") == 0) {
452 if (param != 512 && param != 1024 && param != 1024) {
456 FCPARAM(isp)->isp_maxfrmlen = param;
460 if (strcmp(f->param_name, "exec_throttle") == 0) {
461 if (param < 16 || param > 255) {
465 FCPARAM(isp)->isp_execthrottle = param;
469 if (strcmp(f->param_name, "fullduplex") == 0) {
470 if (param != 0 && param != 1) {
475 FCPARAM(isp)->isp_fwoptions |=
478 FCPARAM(isp)->isp_fwoptions &=
484 if (strcmp(f->param_name, "loopid") == 0) {
485 if (param < 0 || param > 125) {
489 FCPARAM(isp)->isp_loopid = param;
503 isp_intr_enable(void *arg)
505 struct ispsoftc *isp = arg;
506 if (isp->isp_role != ISP_ROLE_NONE) {
508 isp->isp_osinfo.intsok = 1;
510 /* Release our hook so that the boot can continue. */
511 config_intrhook_disestablish(&isp->isp_osinfo.ehook);
515 * Put the target mode functions here, because some are inlines
518 #ifdef ISP_TARGET_MODE
520 static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
521 static INLINE int are_any_luns_enabled(struct ispsoftc *, int);
522 static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
523 static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *);
524 static INLINE int isp_psema_sig_rqe(struct ispsoftc *, int);
525 static INLINE int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int);
526 static INLINE void isp_cv_signal_rqe(struct ispsoftc *, int, int);
527 static INLINE void isp_vsema_rqe(struct ispsoftc *, int);
528 static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
530 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
531 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
532 static void isp_en_lun(struct ispsoftc *, union ccb *);
533 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
534 static timeout_t isp_refire_putback_atio;
535 static void isp_complete_ctio(union ccb *);
536 static void isp_target_putback_atio(union ccb *);
537 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
538 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
539 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
540 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
541 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
542 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
545 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
548 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
553 if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
556 } while ((tptr = tptr->next) != NULL);
561 are_any_luns_enabled(struct ispsoftc *isp, int port)
564 if (IS_DUALBUS(isp)) {
565 lo = (port * (LUN_HASH_SIZE >> 1));
566 hi = lo + (LUN_HASH_SIZE >> 1);
571 for (lo = 0; lo < hi; lo++) {
572 if (isp->isp_osinfo.lun_hash[lo]) {
579 static INLINE tstate_t *
580 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
582 tstate_t *tptr = NULL;
584 if (lun == CAM_LUN_WILDCARD) {
585 if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
586 tptr = &isp->isp_osinfo.tsdflt[bus];
591 tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
598 if (tptr->lun == lun && tptr->bus == bus) {
602 } while ((tptr = tptr->next) != NULL);
607 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
614 isp_psema_sig_rqe(struct ispsoftc *isp, int bus)
616 while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) {
617 isp->isp_osinfo.tmflags[bus] |= TM_WANTED;
619 if (cv_wait_sig(&isp->isp_osinfo.tgtcv0[bus], &isp->isp_lock)) {
623 if (tsleep(&isp->isp_osinfo.tgtcv0[bus], PZERO, "cv_isp", 0)) {
627 isp->isp_osinfo.tmflags[bus] |= TM_BUSY;
633 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo)
636 if (cv_timedwait(&isp->isp_osinfo.tgtcv1[bus], &isp->isp_lock, timo)) {
640 if (tsleep(&isp->isp_osinfo.tgtcv1[bus], PZERO, "cv_isp1", 0)) {
648 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status)
650 isp->isp_osinfo.rstatus[bus] = status;
652 cv_signal(&isp->isp_osinfo.tgtcv1[bus]);
654 wakeup(&isp->isp_osinfo.tgtcv1[bus]);
659 isp_vsema_rqe(struct ispsoftc *isp, int bus)
661 if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) {
662 isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED;
664 cv_signal(&isp->isp_osinfo.tgtcv0[bus]);
666 cv_signal(&isp->isp_osinfo.tgtcv0[bus]);
669 isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY;
672 static INLINE atio_private_data_t *
673 isp_get_atpd(struct ispsoftc *isp, int tag)
675 atio_private_data_t *atp;
676 for (atp = isp->isp_osinfo.atpdp;
677 atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
685 create_lun_state(struct ispsoftc *isp, int bus,
686 struct cam_path *path, tstate_t **rslt)
691 tstate_t *tptr, *new;
693 lun = xpt_path_lun_id(path);
695 return (CAM_LUN_INVALID);
697 if (is_lun_enabled(isp, bus, lun)) {
698 return (CAM_LUN_ALRDY_ENA);
700 new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
702 return (CAM_RESRC_UNAVAIL);
705 status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
706 xpt_path_target_id(path), xpt_path_lun_id(path));
707 if (status != CAM_REQ_CMP) {
713 SLIST_INIT(&new->atios);
714 SLIST_INIT(&new->inots);
717 hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
718 tptr = isp->isp_osinfo.lun_hash[hfx];
720 isp->isp_osinfo.lun_hash[hfx] = new;
727 return (CAM_REQ_CMP);
731 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
736 hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
740 pw = isp->isp_osinfo.lun_hash[hfx];
743 } else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
744 isp->isp_osinfo.lun_hash[hfx] = pw->next;
749 if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
760 free(tptr, M_DEVBUF);
764 * we enter with our locks held.
767 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
769 const char lfmt[] = "Lun now %sabled for target mode on channel %d";
770 struct ccb_en_lun *cel = &ccb->cel;
773 int bus, cmd, av, wildcard;
778 bus = XS_CHANNEL(ccb) & 0x1;
779 tgt = ccb->ccb_h.target_id;
780 lun = ccb->ccb_h.target_lun;
783 * Do some sanity checking first.
786 if ((lun != CAM_LUN_WILDCARD) &&
787 (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
788 ccb->ccb_h.status = CAM_LUN_INVALID;
793 sdparam *sdp = isp->isp_param;
795 if (tgt != CAM_TARGET_WILDCARD &&
796 tgt != sdp->isp_initiator_id) {
797 ccb->ccb_h.status = CAM_TID_INVALID;
801 if (tgt != CAM_TARGET_WILDCARD &&
802 tgt != FCPARAM(isp)->isp_iid) {
803 ccb->ccb_h.status = CAM_TID_INVALID;
807 * This is as a good a place as any to check f/w capabilities.
809 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
810 isp_prt(isp, ISP_LOGERR,
811 "firmware does not support target mode");
812 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
816 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
817 * XXX: dorks with our already fragile enable/disable code.
819 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
820 isp_prt(isp, ISP_LOGERR,
821 "firmware not SCCLUN capable");
825 if (tgt == CAM_TARGET_WILDCARD) {
826 if (lun == CAM_LUN_WILDCARD) {
829 ccb->ccb_h.status = CAM_LUN_INVALID;
837 * Next check to see whether this is a target/lun wildcard action.
839 * If so, we know that we can accept commands for luns that haven't
840 * been enabled yet and send them upstream. Otherwise, we have to
841 * handle them locally (if we see them at all).
845 tptr = &isp->isp_osinfo.tsdflt[bus];
847 if (isp->isp_osinfo.tmflags[bus] &
848 TM_WILDCARD_ENABLED) {
849 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
853 xpt_create_path(&tptr->owner, NULL,
854 xpt_path_path_id(ccb->ccb_h.path),
855 xpt_path_target_id(ccb->ccb_h.path),
856 xpt_path_lun_id(ccb->ccb_h.path));
857 if (ccb->ccb_h.status != CAM_REQ_CMP) {
860 SLIST_INIT(&tptr->atios);
861 SLIST_INIT(&tptr->inots);
862 isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
864 if ((isp->isp_osinfo.tmflags[bus] &
865 TM_WILDCARD_ENABLED) == 0) {
866 ccb->ccb_h.status = CAM_REQ_CMP;
870 ccb->ccb_h.status = CAM_SCSI_BUSY;
873 xpt_free_path(tptr->owner);
874 isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
879 * Now check to see whether this bus needs to be
880 * enabled/disabled with respect to target mode.
883 if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) {
884 av |= ENABLE_TARGET_FLAG;
885 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
887 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
889 isp->isp_osinfo.tmflags[bus] &=
890 ~TM_WILDCARD_ENABLED;
891 xpt_free_path(tptr->owner);
895 isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
896 isp_prt(isp, ISP_LOGINFO,
897 "Target Mode enabled on channel %d", bus);
898 } else if (cel->enable == 0 &&
899 (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) {
900 if (are_any_luns_enabled(isp, bus)) {
901 ccb->ccb_h.status = CAM_SCSI_BUSY;
904 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
906 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
909 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
910 isp_prt(isp, ISP_LOGINFO,
911 "Target Mode disabled on channel %d", bus);
915 ccb->ccb_h.status = CAM_REQ_CMP;
921 create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
922 if (ccb->ccb_h.status != CAM_REQ_CMP) {
926 tptr = get_lun_statep(isp, bus, lun);
928 ccb->ccb_h.status = CAM_LUN_INVALID;
933 if (isp_psema_sig_rqe(isp, bus)) {
934 rls_lun_statep(isp, tptr);
936 destroy_lun_state(isp, tptr);
937 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
942 u_int32_t seq = isp->isp_osinfo.rollinfo++;
943 int c, n, ulun = lun;
945 cmd = RQSTYPE_ENABLE_LUN;
948 if (IS_FC(isp) && lun != 0) {
949 cmd = RQSTYPE_MODIFY_LUN;
952 * For SCC firmware, we only deal with setting
953 * (enabling or modifying) lun 0.
958 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
959 xpt_print_path(ccb->ccb_h.path);
960 isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
963 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
964 xpt_print_path(ccb->ccb_h.path);
965 isp_prt(isp, ISP_LOGERR,
966 "wait for ENABLE/MODIFY LUN timed out");
969 rstat = isp->isp_osinfo.rstatus[bus];
970 if (rstat != LUN_OK) {
971 xpt_print_path(ccb->ccb_h.path);
972 isp_prt(isp, ISP_LOGERR,
973 "ENABLE/MODIFY LUN returned 0x%x", rstat);
977 int c, n, ulun = lun;
981 seq = isp->isp_osinfo.rollinfo++;
982 cmd = -RQSTYPE_MODIFY_LUN;
986 if (IS_FC(isp) && lun != 0) {
989 * For SCC firmware, we only deal with setting
990 * (enabling or modifying) lun 0.
994 if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
995 xpt_print_path(ccb->ccb_h.path);
996 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
999 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
1000 xpt_print_path(ccb->ccb_h.path);
1001 isp_prt(isp, ISP_LOGERR,
1002 "wait for MODIFY LUN timed out");
1005 rstat = isp->isp_osinfo.rstatus[bus];
1006 if (rstat != LUN_OK) {
1007 xpt_print_path(ccb->ccb_h.path);
1008 isp_prt(isp, ISP_LOGERR,
1009 "MODIFY LUN returned 0x%x", rstat);
1012 if (IS_FC(isp) && lun) {
1016 seq = isp->isp_osinfo.rollinfo++;
1019 cmd = -RQSTYPE_ENABLE_LUN;
1020 if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
1021 xpt_print_path(ccb->ccb_h.path);
1022 isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
1025 if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
1026 xpt_print_path(ccb->ccb_h.path);
1027 isp_prt(isp, ISP_LOGERR,
1028 "wait for DISABLE LUN timed out");
1031 rstat = isp->isp_osinfo.rstatus[bus];
1032 if (rstat != LUN_OK) {
1033 xpt_print_path(ccb->ccb_h.path);
1034 isp_prt(isp, ISP_LOGWARN,
1035 "DISABLE LUN returned 0x%x", rstat);
1038 if (are_any_luns_enabled(isp, bus) == 0) {
1039 av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1041 isp_prt(isp, ISP_LOGWARN,
1042 "disable target mode on channel %d failed",
1046 isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1047 xpt_print_path(ccb->ccb_h.path);
1048 isp_prt(isp, ISP_LOGINFO,
1049 "Target Mode disabled on channel %d", bus);
1054 isp_vsema_rqe(isp, bus);
1056 if (rstat != LUN_OK) {
1057 xpt_print_path(ccb->ccb_h.path);
1058 isp_prt(isp, ISP_LOGWARN,
1059 "lun %sable failed", (cel->enable) ? "en" : "dis");
1060 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1061 rls_lun_statep(isp, tptr);
1063 destroy_lun_state(isp, tptr);
1065 xpt_print_path(ccb->ccb_h.path);
1066 isp_prt(isp, ISP_LOGINFO, lfmt,
1067 (cel->enable) ? "en" : "dis", bus);
1068 rls_lun_statep(isp, tptr);
1069 if (cel->enable == 0) {
1070 destroy_lun_state(isp, tptr);
1072 ccb->ccb_h.status = CAM_REQ_CMP;
1077 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
1080 struct ccb_hdr_slist *lp;
1081 struct ccb_hdr *curelm;
1083 union ccb *accb = ccb->cab.abort_ccb;
1085 if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1086 if (IS_FC(isp) && (accb->ccb_h.target_id !=
1087 ((fcparam *) isp->isp_param)->isp_loopid)) {
1088 return (CAM_PATH_INVALID);
1089 } else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1090 ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1091 return (CAM_PATH_INVALID);
1094 tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1096 return (CAM_PATH_INVALID);
1098 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1100 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1103 rls_lun_statep(isp, tptr);
1104 return (CAM_UA_ABORT);
1106 curelm = SLIST_FIRST(lp);
1108 if (curelm == &accb->ccb_h) {
1110 SLIST_REMOVE_HEAD(lp, sim_links.sle);
1112 while(curelm != NULL) {
1113 struct ccb_hdr *nextelm;
1115 nextelm = SLIST_NEXT(curelm, sim_links.sle);
1116 if (nextelm == &accb->ccb_h) {
1118 SLIST_NEXT(curelm, sim_links.sle) =
1119 SLIST_NEXT(nextelm, sim_links.sle);
1125 rls_lun_statep(isp, tptr);
1127 accb->ccb_h.status = CAM_REQ_ABORTED;
1128 return (CAM_REQ_CMP);
1130 return(CAM_PATH_INVALID);
1134 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1137 struct ccb_scsiio *cso = &ccb->csio;
1138 u_int16_t *hp, save_handle;
1139 u_int16_t nxti, optr;
1140 u_int8_t local[QENTRY_LEN];
1143 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1144 xpt_print_path(ccb->ccb_h.path);
1145 printf("Request Queue Overflow in isp_target_start_ctio\n");
1146 return (CAM_RESRC_UNAVAIL);
1148 bzero(local, QENTRY_LEN);
1151 * We're either moving data or completing a command here.
1155 atio_private_data_t *atp;
1156 ct2_entry_t *cto = (ct2_entry_t *) local;
1158 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1159 cto->ct_header.rqs_entry_count = 1;
1160 cto->ct_iid = cso->init_id;
1161 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1162 cto->ct_lun = ccb->ccb_h.target_lun;
1165 atp = isp_get_atpd(isp, cso->tag_id);
1167 isp_prt(isp, ISP_LOGERR,
1168 "cannot find private data adjunct for tag %x",
1173 cto->ct_rxid = cso->tag_id;
1174 if (cso->dxfer_len == 0) {
1175 cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1176 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1177 cto->ct_flags |= CT2_SENDSTATUS;
1178 cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1180 atp->orig_datalen - atp->bytes_xfered;
1181 if (cto->ct_resid < 0) {
1182 cto->rsp.m1.ct_scsi_status |=
1184 } else if (cto->ct_resid > 0) {
1185 cto->rsp.m1.ct_scsi_status |=
1189 if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1190 int m = min(cso->sense_len, MAXRESPLEN);
1191 bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1192 cto->rsp.m1.ct_senselen = m;
1193 cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1196 cto->ct_flags |= CT2_FLAG_MODE0;
1197 if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1198 cto->ct_flags |= CT2_DATA_IN;
1200 cto->ct_flags |= CT2_DATA_OUT;
1202 cto->ct_reloff = atp->bytes_xfered;
1203 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1204 cto->ct_flags |= CT2_SENDSTATUS;
1205 cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1208 (atp->bytes_xfered + cso->dxfer_len);
1209 if (cto->ct_resid < 0) {
1210 cto->rsp.m0.ct_scsi_status |=
1212 } else if (cto->ct_resid > 0) {
1213 cto->rsp.m0.ct_scsi_status |=
1217 atp->last_xframt = cso->dxfer_len;
1220 * If we're sending data and status back together,
1221 * we can't also send back sense data as well.
1223 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1226 if (cto->ct_flags & CT2_SENDSTATUS) {
1227 isp_prt(isp, ISP_LOGTDEBUG0,
1228 "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1229 cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1230 cso->dxfer_len, cto->ct_resid);
1231 cto->ct_flags |= CT2_CCINCR;
1232 atp->state = ATPD_STATE_LAST_CTIO;
1234 atp->state = ATPD_STATE_CTIO;
1235 cto->ct_timeout = 10;
1236 hp = &cto->ct_syshandle;
1238 ct_entry_t *cto = (ct_entry_t *) local;
1240 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1241 cto->ct_header.rqs_entry_count = 1;
1242 cto->ct_iid = cso->init_id;
1243 cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1244 cto->ct_tgt = ccb->ccb_h.target_id;
1245 cto->ct_lun = ccb->ccb_h.target_lun;
1246 cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1247 if (AT_HAS_TAG(cso->tag_id)) {
1248 cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1249 cto->ct_flags |= CT_TQAE;
1251 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1252 cto->ct_flags |= CT_NODISC;
1254 if (cso->dxfer_len == 0) {
1255 cto->ct_flags |= CT_NO_DATA;
1256 } else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1257 cto->ct_flags |= CT_DATA_IN;
1259 cto->ct_flags |= CT_DATA_OUT;
1261 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1262 cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1263 cto->ct_scsi_status = cso->scsi_status;
1264 cto->ct_resid = cso->resid;
1265 isp_prt(isp, ISP_LOGTDEBUG0,
1266 "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1267 cto->ct_fwhandle, cso->scsi_status, cso->resid,
1270 ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1271 cto->ct_timeout = 10;
1272 hp = &cto->ct_syshandle;
1275 if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1276 xpt_print_path(ccb->ccb_h.path);
1277 printf("No XFLIST pointers for isp_target_start_ctio\n");
1278 return (CAM_RESRC_UNAVAIL);
1283 * Call the dma setup routines for this entry (and any subsequent
1284 * CTIOs) if there's data to move, and then tell the f/w it's got
1285 * new things to play with. As with isp_start's usage of DMA setup,
1286 * any swizzling is done in the machine dependent layer. Because
1287 * of this, we put the request onto the queue area first in native
1293 switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1295 ISP_ADD_REQUEST(isp, nxti);
1296 return (CAM_REQ_INPROG);
1299 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1300 isp_destroy_handle(isp, save_handle);
1301 return (CAM_RESRC_UNAVAIL);
1304 isp_destroy_handle(isp, save_handle);
1305 return (XS_ERR(ccb));
1310 isp_refire_putback_atio(void *arg)
1313 isp_target_putback_atio(arg);
1318 isp_target_putback_atio(union ccb *ccb)
1320 struct ispsoftc *isp;
1321 struct ccb_scsiio *cso;
1322 u_int16_t nxti, optr;
1327 if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1328 (void) timeout(isp_refire_putback_atio, ccb, 10);
1329 isp_prt(isp, ISP_LOGWARN,
1330 "isp_target_putback_atio: Request Queue Overflow");
1333 bzero(qe, QENTRY_LEN);
1336 at2_entry_t local, *at = &local;
1337 MEMZERO(at, sizeof (at2_entry_t));
1338 at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1339 at->at_header.rqs_entry_count = 1;
1340 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1341 at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1343 at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1345 at->at_status = CT_OK;
1346 at->at_rxid = cso->tag_id;
1347 at->at_iid = cso->ccb_h.target_id;
1348 isp_put_atio2(isp, at, qe);
1350 at_entry_t local, *at = &local;
1351 MEMZERO(at, sizeof (at_entry_t));
1352 at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1353 at->at_header.rqs_entry_count = 1;
1354 at->at_iid = cso->init_id;
1355 at->at_iid |= XS_CHANNEL(ccb) << 7;
1356 at->at_tgt = cso->ccb_h.target_id;
1357 at->at_lun = cso->ccb_h.target_lun;
1358 at->at_status = CT_OK;
1359 at->at_tag_val = AT_GET_TAG(cso->tag_id);
1360 at->at_handle = AT_GET_HANDLE(cso->tag_id);
1361 isp_put_atio(isp, at, qe);
1363 ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1364 ISP_ADD_REQUEST(isp, nxti);
1365 isp_complete_ctio(ccb);
1369 isp_complete_ctio(union ccb *ccb)
1371 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1372 ccb->ccb_h.status |= CAM_REQ_CMP;
1374 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1379 * Handle ATIO stuff that the generic code can't.
1380 * This means handling CDBs.
1384 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1387 int status, bus, iswildcard;
1388 struct ccb_accept_tio *atiop;
1391 * The firmware status (except for the QLTM_SVALID bit)
1392 * indicates why this ATIO was sent to us.
1394 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1396 * If the DISCONNECTS DISABLED bit is set in the flags field,
1397 * we're still connected on the SCSI bus.
1399 status = aep->at_status;
1400 if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1402 * Bus Phase Sequence error. We should have sense data
1403 * suggested by the f/w. I'm not sure quite yet what
1404 * to do about this for CAM.
1406 isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1407 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1410 if ((status & ~QLTM_SVALID) != AT_CDB) {
1411 isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1413 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1417 bus = GET_BUS_VAL(aep->at_iid);
1418 tptr = get_lun_statep(isp, bus, aep->at_lun);
1420 tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1428 * Because we can't autofeed sense data back with
1429 * a command for parallel SCSI, we can't give back
1430 * a CHECK CONDITION. We'll give back a BUSY status
1431 * instead. This works out okay because the only
1432 * time we should, in fact, get this, is in the
1433 * case that somebody configured us without the
1434 * blackhole driver, so they get what they deserve.
1436 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1440 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1441 if (atiop == NULL) {
1443 * Because we can't autofeed sense data back with
1444 * a command for parallel SCSI, we can't give back
1445 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1446 * instead. This works out okay because the only time we
1447 * should, in fact, get this, is in the case that we've
1450 xpt_print_path(tptr->owner);
1451 isp_prt(isp, ISP_LOGWARN,
1452 "no ATIOS for lun %d from initiator %d on channel %d",
1453 aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1454 if (aep->at_flags & AT_TQAE)
1455 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1457 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1458 rls_lun_statep(isp, tptr);
1461 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1463 atiop->ccb_h.target_id = aep->at_tgt;
1464 atiop->ccb_h.target_lun = aep->at_lun;
1466 if (aep->at_flags & AT_NODISC) {
1467 atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1469 atiop->ccb_h.flags = 0;
1472 if (status & QLTM_SVALID) {
1473 size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1474 atiop->sense_len = amt;
1475 MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1477 atiop->sense_len = 0;
1480 atiop->init_id = GET_IID_VAL(aep->at_iid);
1481 atiop->cdb_len = aep->at_cdblen;
1482 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1483 atiop->ccb_h.status = CAM_CDB_RECVD;
1485 * Construct a tag 'id' based upon tag value (which may be 0..255)
1486 * and the handle (which we have to preserve).
1488 AT_MAKE_TAGID(atiop->tag_id, aep);
1489 if (aep->at_flags & AT_TQAE) {
1490 atiop->tag_action = aep->at_tag_type;
1491 atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1493 xpt_done((union ccb*)atiop);
1494 isp_prt(isp, ISP_LOGTDEBUG0,
1495 "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1496 aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1497 GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1498 aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1499 "nondisc" : "disconnecting");
1500 rls_lun_statep(isp, tptr);
1505 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1509 struct ccb_accept_tio *atiop;
1510 atio_private_data_t *atp;
1513 * The firmware status (except for the QLTM_SVALID bit)
1514 * indicates why this ATIO was sent to us.
1516 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1518 if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1519 isp_prt(isp, ISP_LOGWARN,
1520 "bogus atio (0x%x) leaked to platform", aep->at_status);
1521 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1525 if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1526 lun = aep->at_scclun;
1530 tptr = get_lun_statep(isp, 0, lun);
1532 isp_prt(isp, ISP_LOGWARN, "no state pointer for lun %d", lun);
1533 tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1538 * What we'd like to know is whether or not we have a listener
1539 * upstream that really hasn't configured yet. If we do, then
1540 * we can give a more sensible reply here. If not, then we can
1541 * reject this out of hand.
1543 * Choices for what to send were
1545 * Not Ready, Unit Not Self-Configured Yet
1548 * for the former and
1550 * Illegal Request, Logical Unit Not Supported
1555 * We used to decide whether there was at least one listener
1556 * based upon whether the black hole driver was configured.
1557 * However, recent config(8) changes have made this hard to do
1561 isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1565 atp = isp_get_atpd(isp, 0);
1566 atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1567 if (atiop == NULL || atp == NULL) {
1569 * Because we can't autofeed sense data back with
1570 * a command for parallel SCSI, we can't give back
1571 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1572 * instead. This works out okay because the only time we
1573 * should, in fact, get this, is in the case that we've
1576 xpt_print_path(tptr->owner);
1577 isp_prt(isp, ISP_LOGWARN,
1578 "no %s for lun %d from initiator %d",
1579 (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1580 ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1581 rls_lun_statep(isp, tptr);
1582 isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1585 atp->state = ATPD_STATE_ATIO;
1586 SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1588 isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO2 lun %d, count now %d",
1589 lun, tptr->atio_count);
1591 if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1592 atiop->ccb_h.target_id =
1593 ((fcparam *)isp->isp_param)->isp_loopid;
1594 atiop->ccb_h.target_lun = lun;
1597 * We don't get 'suggested' sense data as we do with SCSI cards.
1599 atiop->sense_len = 0;
1601 atiop->init_id = aep->at_iid;
1602 atiop->cdb_len = ATIO2_CDBLEN;
1603 MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1604 atiop->ccb_h.status = CAM_CDB_RECVD;
1605 atiop->tag_id = aep->at_rxid;
1606 switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1607 case ATIO2_TC_ATTR_SIMPLEQ:
1608 atiop->tag_action = MSG_SIMPLE_Q_TAG;
1610 case ATIO2_TC_ATTR_HEADOFQ:
1611 atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1613 case ATIO2_TC_ATTR_ORDERED:
1614 atiop->tag_action = MSG_ORDERED_Q_TAG;
1616 case ATIO2_TC_ATTR_ACAQ: /* ?? */
1617 case ATIO2_TC_ATTR_UNTAGGED:
1619 atiop->tag_action = 0;
1622 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1624 atp->tag = atiop->tag_id;
1626 atp->orig_datalen = aep->at_datalen;
1627 atp->last_xframt = 0;
1628 atp->bytes_xfered = 0;
1629 atp->state = ATPD_STATE_CAM;
1630 xpt_done((union ccb*)atiop);
1632 isp_prt(isp, ISP_LOGTDEBUG0,
1633 "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1634 aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1635 lun, aep->at_taskflags, aep->at_datalen);
1636 rls_lun_statep(isp, tptr);
1641 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1644 int sentstatus, ok, notify_cam, resid = 0;
1648 * CTIO and CTIO2 are close enough....
1651 ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1652 KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1653 isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1656 ct2_entry_t *ct = arg;
1657 atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1659 isp_prt(isp, ISP_LOGERR,
1660 "cannot find adjunct for %x after I/O",
1664 sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1665 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1666 if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1667 ccb->ccb_h.status |= CAM_SENT_SENSE;
1669 notify_cam = ct->ct_header.rqs_seqno & 0x1;
1670 if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1671 resid = ct->ct_resid;
1672 atp->bytes_xfered += (atp->last_xframt - resid);
1673 atp->last_xframt = 0;
1675 if (sentstatus || !ok) {
1678 isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1679 "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1680 ct->ct_rxid, ct->ct_status, ct->ct_flags,
1681 (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1682 resid, sentstatus? "FIN" : "MID");
1685 /* XXX: should really come after isp_complete_ctio */
1686 atp->state = ATPD_STATE_PDON;
1688 ct_entry_t *ct = arg;
1689 sentstatus = ct->ct_flags & CT_SENDSTATUS;
1690 ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1692 * We *ought* to be able to get back to the original ATIO
1693 * here, but for some reason this gets lost. It's just as
1694 * well because it's squirrelled away as part of periph
1697 * We can live without it as long as we continue to use
1698 * the auto-replenish feature for CTIOs.
1700 notify_cam = ct->ct_header.rqs_seqno & 0x1;
1701 if (ct->ct_status & QLTM_SVALID) {
1702 char *sp = (char *)ct;
1703 sp += CTIO_SENSE_OFFSET;
1704 ccb->csio.sense_len =
1705 min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1706 MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1707 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1709 if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1710 resid = ct->ct_resid;
1712 isp_prt(isp, ISP_LOGTDEBUG0,
1713 "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1714 ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1715 ct->ct_status, ct->ct_flags, resid,
1716 sentstatus? "FIN" : "MID");
1717 tval = ct->ct_fwhandle;
1719 ccb->csio.resid += resid;
1722 * We're here either because intermediate data transfers are done
1723 * and/or the final status CTIO (which may have joined with a
1724 * Data Transfer) is done.
1726 * In any case, for this platform, the upper layers figure out
1727 * what to do next, so all we do here is collect status and
1728 * pass information along. Any DMA handles have already been
1731 if (notify_cam == 0) {
1732 isp_prt(isp, ISP_LOGTDEBUG0, " INTER CTIO[0x%x] done", tval);
1736 isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1737 (sentstatus)? " FINAL " : "MIDTERM ", tval);
1740 isp_target_putback_atio(ccb);
1742 isp_complete_ctio(ccb);
1749 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1751 return (0); /* XXXX */
1755 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1758 switch (inp->in_status) {
1759 case IN_PORT_LOGOUT:
1760 isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1763 case IN_PORT_CHANGED:
1764 isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1767 case IN_GLOBAL_LOGO:
1768 isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1772 atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1773 struct ccb_immed_notify *inot = NULL;
1776 tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1778 inot = (struct ccb_immed_notify *)
1779 SLIST_FIRST(&tptr->inots);
1781 SLIST_REMOVE_HEAD(&tptr->inots,
1785 isp_prt(isp, ISP_LOGWARN,
1786 "abort task RX_ID %x IID %d state %d",
1787 inp->in_seqid, inp->in_iid, atp->state);
1789 isp_prt(isp, ISP_LOGWARN,
1790 "abort task RX_ID %x from iid %d, state unknown",
1791 inp->in_seqid, inp->in_iid);
1794 inot->initiator_id = inp->in_iid;
1795 inot->sense_len = 0;
1796 inot->message_args[0] = MSG_ABORT_TAG;
1797 inot->message_args[1] = inp->in_seqid & 0xff;
1798 inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1799 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1800 xpt_done((union ccb *)inot);
1812 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1814 struct cam_sim *sim;
1815 struct ispsoftc *isp;
1817 sim = (struct cam_sim *)cbarg;
1818 isp = (struct ispsoftc *) cam_sim_softc(sim);
1820 case AC_LOST_DEVICE:
1822 u_int16_t oflags, nflags;
1823 sdparam *sdp = isp->isp_param;
1826 tgt = xpt_path_target_id(path);
1828 sdp += cam_sim_bus(sim);
1830 nflags = sdp->isp_devparam[tgt].nvrm_flags;
1831 #ifndef ISP_TARGET_MODE
1832 nflags &= DPARM_SAFE_DFLT;
1833 if (isp->isp_loaded_fw) {
1834 nflags |= DPARM_NARROW | DPARM_ASYNC;
1837 nflags = DPARM_DEFAULT;
1839 oflags = sdp->isp_devparam[tgt].goal_flags;
1840 sdp->isp_devparam[tgt].goal_flags = nflags;
1841 sdp->isp_devparam[tgt].dev_update = 1;
1842 isp->isp_update |= (1 << cam_sim_bus(sim));
1843 (void) isp_control(isp,
1844 ISPCTL_UPDATE_PARAMS, NULL);
1845 sdp->isp_devparam[tgt].goal_flags = oflags;
1851 isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1857 isp_poll(struct cam_sim *sim)
1859 struct ispsoftc *isp = cam_sim_softc(sim);
1860 u_int16_t isr, sema, mbox;
1863 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1864 isp_intr(isp, isr, sema, mbox);
1871 isp_watchdog(void *arg)
1874 struct ispsoftc *isp = XS_ISP(xs);
1879 * We've decided this command is dead. Make sure we're not trying
1880 * to kill a command that's already dead by getting it's handle and
1881 * and seeing whether it's still alive.
1884 iok = isp->isp_osinfo.intsok;
1885 isp->isp_osinfo.intsok = 0;
1886 handle = isp_find_handle(isp, xs);
1888 u_int16_t isr, sema, mbox;
1890 if (XS_CMD_DONE_P(xs)) {
1891 isp_prt(isp, ISP_LOGDEBUG1,
1892 "watchdog found done cmd (handle 0x%x)", handle);
1897 if (XS_CMD_WDOG_P(xs)) {
1898 isp_prt(isp, ISP_LOGDEBUG2,
1899 "recursive watchdog (handle 0x%x)", handle);
1905 if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1906 isp_intr(isp, isr, sema, mbox);
1908 if (XS_CMD_DONE_P(xs)) {
1909 isp_prt(isp, ISP_LOGDEBUG2,
1910 "watchdog cleanup for handle 0x%x", handle);
1911 xpt_done((union ccb *) xs);
1912 } else if (XS_CMD_GRACE_P(xs)) {
1914 * Make sure the command is *really* dead before we
1915 * release the handle (and DMA resources) for reuse.
1917 (void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1920 * After this point, the comamnd is really dead.
1922 if (XS_XFRLEN(xs)) {
1923 ISP_DMAFREE(isp, xs, handle);
1925 isp_destroy_handle(isp, handle);
1926 xpt_print_path(xs->ccb_h.path);
1927 isp_prt(isp, ISP_LOGWARN,
1928 "watchdog timeout for handle 0x%x", handle);
1929 XS_SETERR(xs, CAM_CMD_TIMEOUT);
1933 u_int16_t nxti, optr;
1934 ispreq_t local, *mp= &local, *qe;
1937 xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1938 if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
1943 MEMZERO((void *) mp, sizeof (*mp));
1944 mp->req_header.rqs_entry_count = 1;
1945 mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1946 mp->req_modifier = SYNC_ALL;
1947 mp->req_target = XS_CHANNEL(xs) << 7;
1948 isp_put_request(isp, mp, qe);
1949 ISP_ADD_REQUEST(isp, nxti);
1952 isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1954 isp->isp_osinfo.intsok = iok;
1959 isp_kthread(void *arg)
1961 struct ispsoftc *isp = arg;
1964 mtx_lock(&isp->isp_lock);
1969 * The first loop is for our usage where we have yet to have
1970 * gotten good fibre channel state.
1975 isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state");
1976 while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1977 isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood");
1978 if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1979 FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1980 if (FCPARAM(isp)->loop_seen_once == 0 ||
1981 isp->isp_osinfo.ktmature == 0) {
1986 msleep(isp_kthread, &isp->isp_lock,
1987 PRIBIO, "isp_fcthrd", hz);
1989 (void) tsleep(isp_kthread, PRIBIO, "isp_fcthrd", hz);
1994 * Even if we didn't get good loop state we may be
1995 * unfreezing the SIMQ so that we can kill off
1996 * commands (if we've never seen loop before, for example).
1998 isp->isp_osinfo.ktmature = 1;
1999 wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
2000 isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
2001 if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
2002 isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq");
2003 ISPLOCK_2_CAMLOCK(isp);
2004 xpt_release_simq(isp->isp_sim, 1);
2005 CAMLOCK_2_ISPLOCK(isp);
2007 isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called");
2009 cv_wait(&isp->isp_osinfo.kthread_cv, &isp->isp_lock);
2011 (void) tsleep(&isp->isp_osinfo.kthread_cv, PRIBIO, "fc_cv", 0);
2017 isp_action(struct cam_sim *sim, union ccb *ccb)
2019 int bus, tgt, error;
2020 struct ispsoftc *isp;
2021 struct ccb_trans_settings *cts;
2023 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
2025 isp = (struct ispsoftc *)cam_sim_softc(sim);
2026 ccb->ccb_h.sim_priv.entries[0].field = 0;
2027 ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2028 if (isp->isp_state != ISP_RUNSTATE &&
2029 ccb->ccb_h.func_code == XPT_SCSI_IO) {
2030 CAMLOCK_2_ISPLOCK(isp);
2032 if (isp->isp_state != ISP_INITSTATE) {
2035 * Lie. Say it was a selection timeout.
2037 ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
2038 xpt_freeze_devq(ccb->ccb_h.path, 1);
2042 isp->isp_state = ISP_RUNSTATE;
2043 ISPLOCK_2_CAMLOCK(isp);
2045 isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2048 switch (ccb->ccb_h.func_code) {
2049 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2051 * Do a couple of preliminary checks...
2053 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2054 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2055 ccb->ccb_h.status = CAM_REQ_INVALID;
2061 if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2062 ccb->ccb_h.status = CAM_PATH_INVALID;
2063 } else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2064 ccb->ccb_h.status = CAM_PATH_INVALID;
2066 if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2067 isp_prt(isp, ISP_LOGERR,
2068 "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2069 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2074 ((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2075 CAMLOCK_2_ISPLOCK(isp);
2076 error = isp_start((XS_T *) ccb);
2079 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2080 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2081 u_int64_t ticks = (u_int64_t) hz;
2082 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
2083 ticks = 60 * 1000 * ticks;
2085 ticks = ccb->ccb_h.timeout * hz;
2086 ticks = ((ticks + 999) / 1000) + hz + hz;
2087 if (ticks >= 0x80000000) {
2088 isp_prt(isp, ISP_LOGERR,
2089 "timeout overflow");
2092 ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
2093 (caddr_t)ccb, (int)ticks);
2095 callout_handle_init(&ccb->ccb_h.timeout_ch);
2097 ISPLOCK_2_CAMLOCK(isp);
2101 * This can only happen for Fibre Channel
2103 KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2104 if (FCPARAM(isp)->loop_seen_once == 0 &&
2105 isp->isp_osinfo.ktmature) {
2106 ISPLOCK_2_CAMLOCK(isp);
2107 XS_SETERR(ccb, CAM_SEL_TIMEOUT);
2112 cv_signal(&isp->isp_osinfo.kthread_cv);
2114 wakeup(&isp->isp_osinfo.kthread_cv);
2116 isp_freeze_loopdown(isp, "isp_action(RQLATER)");
2117 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2118 ISPLOCK_2_CAMLOCK(isp);
2122 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2123 ISPLOCK_2_CAMLOCK(isp);
2127 isp_done((struct ccb_scsiio *) ccb);
2128 ISPLOCK_2_CAMLOCK(isp);
2131 isp_prt(isp, ISP_LOGERR,
2132 "What's this? 0x%x at %d in file %s",
2133 error, __LINE__, __FILE__);
2134 XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2136 ISPLOCK_2_CAMLOCK(isp);
2140 #ifdef ISP_TARGET_MODE
2141 case XPT_EN_LUN: /* Enable LUN as a target */
2144 CAMLOCK_2_ISPLOCK(isp);
2145 iok = isp->isp_osinfo.intsok;
2146 isp->isp_osinfo.intsok = 0;
2147 isp_en_lun(isp, ccb);
2148 isp->isp_osinfo.intsok = iok;
2149 ISPLOCK_2_CAMLOCK(isp);
2153 case XPT_NOTIFY_ACK: /* recycle notify ack */
2154 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */
2155 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
2158 get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2160 ccb->ccb_h.status = CAM_LUN_INVALID;
2164 ccb->ccb_h.sim_priv.entries[0].field = 0;
2165 ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2166 ccb->ccb_h.flags = 0;
2168 CAMLOCK_2_ISPLOCK(isp);
2169 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2171 * Note that the command itself may not be done-
2172 * it may not even have had the first CTIO sent.
2175 isp_prt(isp, ISP_LOGTDEBUG0,
2176 "Put FREE ATIO2, lun %d, count now %d",
2177 ccb->ccb_h.target_lun, tptr->atio_count);
2178 SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2180 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2181 SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2186 rls_lun_statep(isp, tptr);
2187 ccb->ccb_h.status = CAM_REQ_INPROG;
2188 ISPLOCK_2_CAMLOCK(isp);
2191 case XPT_CONT_TARGET_IO:
2193 CAMLOCK_2_ISPLOCK(isp);
2194 ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2195 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2196 isp_prt(isp, ISP_LOGWARN,
2197 "XPT_CONT_TARGET_IO: status 0x%x",
2199 XS_SETERR(ccb, CAM_REQUEUE_REQ);
2200 ISPLOCK_2_CAMLOCK(isp);
2203 ISPLOCK_2_CAMLOCK(isp);
2204 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2209 case XPT_RESET_DEV: /* BDR the specified SCSI device */
2211 bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2212 tgt = ccb->ccb_h.target_id;
2215 CAMLOCK_2_ISPLOCK(isp);
2216 error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2217 ISPLOCK_2_CAMLOCK(isp);
2219 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2221 ccb->ccb_h.status = CAM_REQ_CMP;
2225 case XPT_ABORT: /* Abort the specified CCB */
2227 union ccb *accb = ccb->cab.abort_ccb;
2228 CAMLOCK_2_ISPLOCK(isp);
2229 switch (accb->ccb_h.func_code) {
2230 #ifdef ISP_TARGET_MODE
2231 case XPT_ACCEPT_TARGET_IO:
2232 case XPT_IMMED_NOTIFY:
2233 ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2235 case XPT_CONT_TARGET_IO:
2236 isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2237 ccb->ccb_h.status = CAM_UA_ABORT;
2241 error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2243 ccb->ccb_h.status = CAM_UA_ABORT;
2245 ccb->ccb_h.status = CAM_REQ_CMP;
2249 ccb->ccb_h.status = CAM_REQ_INVALID;
2252 ISPLOCK_2_CAMLOCK(isp);
2256 #ifdef CAM_NEW_TRAN_CODE
2257 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
2259 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS)
2261 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
2263 if (!IS_CURRENT_SETTINGS(cts)) {
2264 ccb->ccb_h.status = CAM_REQ_INVALID;
2268 tgt = cts->ccb_h.target_id;
2269 CAMLOCK_2_ISPLOCK(isp);
2271 #ifndef CAM_NEW_TRAN_CODE
2272 sdparam *sdp = isp->isp_param;
2275 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2279 * We always update (internally) from goal_flags
2280 * so any request to change settings just gets
2281 * vectored to that location.
2283 dptr = &sdp->isp_devparam[tgt].goal_flags;
2286 * Note that these operations affect the
2287 * the goal flags (goal_flags)- not
2288 * the current state flags. Then we mark
2289 * things so that the next operation to
2290 * this HBA will cause the update to occur.
2292 if (cts->valid & CCB_TRANS_DISC_VALID) {
2293 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2294 *dptr |= DPARM_DISC;
2296 *dptr &= ~DPARM_DISC;
2299 if (cts->valid & CCB_TRANS_TQ_VALID) {
2300 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2301 *dptr |= DPARM_TQING;
2303 *dptr &= ~DPARM_TQING;
2306 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2307 switch (cts->bus_width) {
2308 case MSG_EXT_WDTR_BUS_16_BIT:
2309 *dptr |= DPARM_WIDE;
2312 *dptr &= ~DPARM_WIDE;
2316 * Any SYNC RATE of nonzero and SYNC_OFFSET
2317 * of nonzero will cause us to go to the
2318 * selected (from NVRAM) maximum value for
2319 * this device. At a later point, we'll
2320 * allow finer control.
2322 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2323 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2324 (cts->sync_offset > 0)) {
2325 *dptr |= DPARM_SYNC;
2327 *dptr &= ~DPARM_SYNC;
2329 *dptr |= DPARM_SAFE_DFLT;
2331 struct ccb_trans_settings_scsi *scsi =
2332 &cts->proto_specific.scsi;
2333 struct ccb_trans_settings_spi *spi =
2334 &cts->xport_specific.spi;
2335 sdparam *sdp = isp->isp_param;
2338 bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2341 * We always update (internally) from goal_flags
2342 * so any request to change settings just gets
2343 * vectored to that location.
2345 dptr = &sdp->isp_devparam[tgt].goal_flags;
2347 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2348 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2349 *dptr |= DPARM_DISC;
2351 *dptr &= ~DPARM_DISC;
2354 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2355 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2356 *dptr |= DPARM_TQING;
2358 *dptr &= ~DPARM_TQING;
2361 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2362 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2363 *dptr |= DPARM_WIDE;
2365 *dptr &= ~DPARM_WIDE;
2371 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2372 (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2373 (spi->sync_period && spi->sync_offset)) {
2374 *dptr |= DPARM_SYNC;
2376 * XXX: CHECK FOR LEGALITY
2378 sdp->isp_devparam[tgt].goal_period =
2380 sdp->isp_devparam[tgt].goal_offset =
2383 *dptr &= ~DPARM_SYNC;
2386 isp_prt(isp, ISP_LOGDEBUG0,
2387 "SET bus %d targ %d to flags %x off %x per %x",
2388 bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2389 sdp->isp_devparam[tgt].goal_offset,
2390 sdp->isp_devparam[tgt].goal_period);
2391 sdp->isp_devparam[tgt].dev_update = 1;
2392 isp->isp_update |= (1 << bus);
2394 ISPLOCK_2_CAMLOCK(isp);
2395 ccb->ccb_h.status = CAM_REQ_CMP;
2398 case XPT_GET_TRAN_SETTINGS:
2400 tgt = cts->ccb_h.target_id;
2401 CAMLOCK_2_ISPLOCK(isp);
2403 #ifndef CAM_NEW_TRAN_CODE
2405 * a lot of normal SCSI things don't make sense.
2407 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2408 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2410 * How do you measure the width of a high
2411 * speed serial bus? Well, in bytes.
2413 * Offset and period make no sense, though, so we set
2414 * (above) a 'base' transfer speed to be gigabit.
2416 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2418 fcparam *fcp = isp->isp_param;
2419 struct ccb_trans_settings_fc *fc =
2420 &cts->xport_specific.fc;
2422 cts->protocol = PROTO_SCSI;
2423 cts->protocol_version = SCSI_REV_2;
2424 cts->transport = XPORT_FC;
2425 cts->transport_version = 0;
2427 fc->valid = CTS_FC_VALID_SPEED;
2428 if (fcp->isp_gbspeed == 2)
2429 fc->bitrate = 200000;
2431 fc->bitrate = 100000;
2432 if (tgt > 0 && tgt < MAX_FC_TARG) {
2433 struct lportdb *lp = &fcp->portdb[tgt];
2434 fc->wwnn = lp->node_wwn;
2435 fc->wwpn = lp->port_wwn;
2436 fc->port = lp->portid;
2437 fc->valid |= CTS_FC_VALID_WWNN |
2438 CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2442 #ifdef CAM_NEW_TRAN_CODE
2443 struct ccb_trans_settings_scsi *scsi =
2444 &cts->proto_specific.scsi;
2445 struct ccb_trans_settings_spi *spi =
2446 &cts->xport_specific.spi;
2448 sdparam *sdp = isp->isp_param;
2449 int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2450 u_int16_t dval, pval, oval;
2454 if (IS_CURRENT_SETTINGS(cts)) {
2455 sdp->isp_devparam[tgt].dev_refresh = 1;
2456 isp->isp_update |= (1 << bus);
2457 (void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2459 dval = sdp->isp_devparam[tgt].actv_flags;
2460 oval = sdp->isp_devparam[tgt].actv_offset;
2461 pval = sdp->isp_devparam[tgt].actv_period;
2463 dval = sdp->isp_devparam[tgt].nvrm_flags;
2464 oval = sdp->isp_devparam[tgt].nvrm_offset;
2465 pval = sdp->isp_devparam[tgt].nvrm_period;
2468 #ifndef CAM_NEW_TRAN_CODE
2469 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2471 if (dval & DPARM_DISC) {
2472 cts->flags |= CCB_TRANS_DISC_ENB;
2474 if (dval & DPARM_TQING) {
2475 cts->flags |= CCB_TRANS_TAG_ENB;
2477 if (dval & DPARM_WIDE) {
2478 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2480 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2482 cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2483 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2485 if ((dval & DPARM_SYNC) && oval != 0) {
2486 cts->sync_period = pval;
2487 cts->sync_offset = oval;
2489 CCB_TRANS_SYNC_RATE_VALID |
2490 CCB_TRANS_SYNC_OFFSET_VALID;
2493 cts->protocol = PROTO_SCSI;
2494 cts->protocol_version = SCSI_REV_2;
2495 cts->transport = XPORT_SPI;
2496 cts->transport_version = 2;
2498 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2499 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2500 if (dval & DPARM_DISC) {
2501 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2503 if (dval & DPARM_TQING) {
2504 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2506 if ((dval & DPARM_SYNC) && oval && pval) {
2507 spi->sync_offset = oval;
2508 spi->sync_period = pval;
2509 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2510 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2512 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2513 if (dval & DPARM_WIDE) {
2514 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2516 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2518 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2519 scsi->valid = CTS_SCSI_VALID_TQ;
2520 spi->valid |= CTS_SPI_VALID_DISC;
2525 isp_prt(isp, ISP_LOGDEBUG0,
2526 "GET %s bus %d targ %d to flags %x off %x per %x",
2527 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2528 bus, tgt, dval, oval, pval);
2530 ISPLOCK_2_CAMLOCK(isp);
2531 ccb->ccb_h.status = CAM_REQ_CMP;
2535 case XPT_CALC_GEOMETRY:
2537 struct ccb_calc_geometry *ccg;
2540 if (ccg->block_size == 0) {
2541 isp_prt(isp, ISP_LOGERR,
2542 "%d.%d XPT_CALC_GEOMETRY block size 0?",
2543 ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2544 ccb->ccb_h.status = CAM_REQ_INVALID;
2548 cam_calc_geometry(ccg, /*extended*/1);
2552 case XPT_RESET_BUS: /* Reset the specified bus */
2553 bus = cam_sim_bus(sim);
2554 CAMLOCK_2_ISPLOCK(isp);
2555 error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2556 ISPLOCK_2_CAMLOCK(isp);
2558 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2560 if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2561 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2562 else if (isp->isp_path != NULL)
2563 xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2564 ccb->ccb_h.status = CAM_REQ_CMP;
2569 case XPT_TERM_IO: /* Terminate the I/O process */
2570 ccb->ccb_h.status = CAM_REQ_INVALID;
2574 case XPT_PATH_INQ: /* Path routing inquiry */
2576 struct ccb_pathinq *cpi = &ccb->cpi;
2578 cpi->version_num = 1;
2579 #ifdef ISP_TARGET_MODE
2580 cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2582 cpi->target_sprt = 0;
2584 cpi->hba_eng_cnt = 0;
2585 cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2586 cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2587 cpi->bus_id = cam_sim_bus(sim);
2589 cpi->hba_misc = PIM_NOBUSRESET;
2591 * Because our loop ID can shift from time to time,
2592 * make our initiator ID out of range of our bus.
2594 cpi->initiator_id = cpi->max_target + 1;
2597 * Set base transfer capabilities for Fibre Channel.
2598 * Technically not correct because we don't know
2599 * what media we're running on top of- but we'll
2600 * look good if we always say 100MB/s.
2602 if (FCPARAM(isp)->isp_gbspeed == 2)
2603 cpi->base_transfer_speed = 200000;
2605 cpi->base_transfer_speed = 100000;
2606 cpi->hba_inquiry = PI_TAG_ABLE;
2607 #ifdef CAM_NEW_TRAN_CODE
2608 cpi->transport = XPORT_FC;
2609 cpi->transport_version = 0; /* WHAT'S THIS FOR? */
2612 sdparam *sdp = isp->isp_param;
2613 sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2614 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2616 cpi->initiator_id = sdp->isp_initiator_id;
2617 cpi->base_transfer_speed = 3300;
2618 #ifdef CAM_NEW_TRAN_CODE
2619 cpi->transport = XPORT_SPI;
2620 cpi->transport_version = 2; /* WHAT'S THIS FOR? */
2623 #ifdef CAM_NEW_TRAN_CODE
2624 cpi->protocol = PROTO_SCSI;
2625 cpi->protocol_version = SCSI_REV_2;
2627 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2628 strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2629 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2630 cpi->unit_number = cam_sim_unit(sim);
2631 cpi->ccb_h.status = CAM_REQ_CMP;
2636 ccb->ccb_h.status = CAM_REQ_INVALID;
2642 #define ISPDDB (CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2644 isp_done(struct ccb_scsiio *sccb)
2646 struct ispsoftc *isp = XS_ISP(sccb);
2649 XS_SETERR(sccb, CAM_REQ_CMP);
2651 if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2652 (sccb->scsi_status != SCSI_STATUS_OK)) {
2653 sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2654 if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2655 (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2656 sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2658 sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2662 sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2663 if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2664 if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2665 sccb->ccb_h.status |= CAM_DEV_QFRZN;
2666 xpt_freeze_devq(sccb->ccb_h.path, 1);
2667 isp_prt(isp, ISP_LOGDEBUG0,
2668 "freeze devq %d.%d cam sts %x scsi sts %x",
2669 sccb->ccb_h.target_id, sccb->ccb_h.target_lun,
2670 sccb->ccb_h.status, sccb->scsi_status);
2674 if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2675 (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2676 xpt_print_path(sccb->ccb_h.path);
2677 isp_prt(isp, ISP_LOGINFO,
2678 "cam completion status 0x%x", sccb->ccb_h.status);
2681 XS_CMD_S_DONE(sccb);
2682 if (XS_CMD_WDOG_P(sccb) == 0) {
2683 untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2684 if (XS_CMD_GRACE_P(sccb)) {
2685 isp_prt(isp, ISP_LOGDEBUG2,
2686 "finished command on borrowed time");
2688 XS_CMD_S_CLEAR(sccb);
2689 ISPLOCK_2_CAMLOCK(isp);
2690 xpt_done((union ccb *) sccb);
2691 CAMLOCK_2_ISPLOCK(isp);
2696 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2700 case ISPASYNC_NEW_TGT_PARAMS:
2702 #ifdef CAM_NEW_TRAN_CODE
2703 struct ccb_trans_settings_scsi *scsi;
2704 struct ccb_trans_settings_spi *spi;
2707 sdparam *sdp = isp->isp_param;
2708 struct ccb_trans_settings cts;
2709 struct cam_path *tmppath;
2711 bzero(&cts, sizeof (struct ccb_trans_settings));
2713 tgt = *((int *)arg);
2714 bus = (tgt >> 16) & 0xffff;
2717 ISPLOCK_2_CAMLOCK(isp);
2718 if (xpt_create_path(&tmppath, NULL,
2719 cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2720 tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2721 CAMLOCK_2_ISPLOCK(isp);
2722 isp_prt(isp, ISP_LOGWARN,
2723 "isp_async cannot make temp path for %d.%d",
2728 CAMLOCK_2_ISPLOCK(isp);
2729 flags = sdp->isp_devparam[tgt].actv_flags;
2730 #ifdef CAM_NEW_TRAN_CODE
2731 cts.type = CTS_TYPE_CURRENT_SETTINGS;
2732 cts.protocol = PROTO_SCSI;
2733 cts.transport = XPORT_SPI;
2735 scsi = &cts.proto_specific.scsi;
2736 spi = &cts.xport_specific.spi;
2738 if (flags & DPARM_TQING) {
2739 scsi->valid |= CTS_SCSI_VALID_TQ;
2740 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2741 spi->flags |= CTS_SPI_FLAGS_TAG_ENB;
2744 if (flags & DPARM_DISC) {
2745 spi->valid |= CTS_SPI_VALID_DISC;
2746 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2748 spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
2749 if (flags & DPARM_WIDE) {
2750 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2752 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2754 if (flags & DPARM_SYNC) {
2755 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2756 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2757 spi->sync_period = sdp->isp_devparam[tgt].actv_period;
2758 spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
2761 cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2762 cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2763 if (flags & DPARM_DISC) {
2764 cts.flags |= CCB_TRANS_DISC_ENB;
2766 if (flags & DPARM_TQING) {
2767 cts.flags |= CCB_TRANS_TAG_ENB;
2769 cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2770 cts.bus_width = (flags & DPARM_WIDE)?
2771 MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2772 cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2773 cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2774 if (flags & DPARM_SYNC) {
2776 CCB_TRANS_SYNC_RATE_VALID |
2777 CCB_TRANS_SYNC_OFFSET_VALID;
2780 isp_prt(isp, ISP_LOGDEBUG2,
2781 "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2782 bus, tgt, sdp->isp_devparam[tgt].actv_period,
2783 sdp->isp_devparam[tgt].actv_offset, flags);
2784 xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2785 ISPLOCK_2_CAMLOCK(isp);
2786 xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2787 xpt_free_path(tmppath);
2788 CAMLOCK_2_ISPLOCK(isp);
2791 case ISPASYNC_BUS_RESET:
2792 bus = *((int *)arg);
2793 isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2795 if (bus > 0 && isp->isp_path2) {
2796 ISPLOCK_2_CAMLOCK(isp);
2797 xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2798 CAMLOCK_2_ISPLOCK(isp);
2799 } else if (isp->isp_path) {
2800 ISPLOCK_2_CAMLOCK(isp);
2801 xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2802 CAMLOCK_2_ISPLOCK(isp);
2806 if (isp->isp_path) {
2807 isp_freeze_loopdown(isp, "ISPASYNC_LIP");
2809 isp_prt(isp, ISP_LOGINFO, "LIP Received");
2811 case ISPASYNC_LOOP_RESET:
2812 if (isp->isp_path) {
2813 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET");
2815 isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2817 case ISPASYNC_LOOP_DOWN:
2818 if (isp->isp_path) {
2819 isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN");
2821 isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2823 case ISPASYNC_LOOP_UP:
2825 * Now we just note that Loop has come up. We don't
2826 * actually do anything because we're waiting for a
2827 * Change Notify before activating the FC cleanup
2828 * thread to look at the state of the loop again.
2830 isp_prt(isp, ISP_LOGINFO, "Loop UP");
2832 case ISPASYNC_PROMENADE:
2834 struct cam_path *tmppath;
2835 const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2836 "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2837 static const char *roles[4] = {
2838 "(none)", "Target", "Initiator", "Target/Initiator"
2840 fcparam *fcp = isp->isp_param;
2841 int tgt = *((int *) arg);
2842 int is_tgt_mask = (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT);
2843 struct lportdb *lp = &fcp->portdb[tgt];
2845 isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2846 roles[lp->roles & 0x3],
2847 (lp->valid)? "Arrived" : "Departed",
2848 (u_int32_t) (lp->port_wwn >> 32),
2849 (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2850 (u_int32_t) (lp->node_wwn >> 32),
2851 (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2853 ISPLOCK_2_CAMLOCK(isp);
2854 if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim),
2855 (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2856 CAMLOCK_2_ISPLOCK(isp);
2860 * Policy: only announce targets.
2862 if (lp->roles & is_tgt_mask) {
2864 xpt_async(AC_FOUND_DEVICE, tmppath, NULL);
2866 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2869 xpt_free_path(tmppath);
2870 CAMLOCK_2_ISPLOCK(isp);
2873 case ISPASYNC_CHANGE_NOTIFY:
2874 if (arg == ISPASYNC_CHANGE_PDB) {
2875 isp_prt(isp, ISP_LOGINFO,
2876 "Port Database Changed");
2877 } else if (arg == ISPASYNC_CHANGE_SNS) {
2878 isp_prt(isp, ISP_LOGINFO,
2879 "Name Server Database Changed");
2882 cv_signal(&isp->isp_osinfo.kthread_cv);
2884 wakeup(&isp->isp_osinfo.kthread_cv);
2887 case ISPASYNC_FABRIC_DEV:
2889 int target, base, lim;
2890 fcparam *fcp = isp->isp_param;
2891 struct lportdb *lp = NULL;
2892 struct lportdb *clp = (struct lportdb *) arg;
2895 switch (clp->port_type) {
2922 isp_prt(isp, ISP_LOGINFO,
2923 "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
2926 * If we don't have an initiator role we bail.
2928 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
2931 if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
2936 * Is this entry for us? If so, we bail.
2939 if (fcp->isp_portid == clp->portid) {
2944 * Else, the default policy is to find room for it in
2945 * our local port database. Later, when we execute
2946 * the call to isp_pdb_sync either this newly arrived
2947 * or already logged in device will be (re)announced.
2950 if (fcp->isp_topo == TOPO_FL_PORT)
2955 if (fcp->isp_topo == TOPO_N_PORT)
2961 * Is it already in our list?
2963 for (target = base; target < lim; target++) {
2964 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2967 lp = &fcp->portdb[target];
2968 if (lp->port_wwn == clp->port_wwn &&
2969 lp->node_wwn == clp->node_wwn) {
2977 for (target = base; target < lim; target++) {
2978 if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2981 lp = &fcp->portdb[target];
2982 if (lp->port_wwn == 0) {
2986 if (target == lim) {
2987 isp_prt(isp, ISP_LOGWARN,
2988 "out of space for fabric devices");
2991 lp->port_type = clp->port_type;
2992 lp->fc4_type = clp->fc4_type;
2993 lp->node_wwn = clp->node_wwn;
2994 lp->port_wwn = clp->port_wwn;
2995 lp->portid = clp->portid;
2999 #ifdef ISP_TARGET_MODE
3000 case ISPASYNC_TARGET_MESSAGE:
3002 tmd_msg_t *mp = arg;
3003 isp_prt(isp, ISP_LOGALL,
3004 "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
3005 mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
3006 (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
3010 case ISPASYNC_TARGET_EVENT:
3012 tmd_event_t *ep = arg;
3013 isp_prt(isp, ISP_LOGALL,
3014 "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
3017 case ISPASYNC_TARGET_ACTION:
3018 switch (((isphdr_t *)arg)->rqs_entry_type) {
3020 isp_prt(isp, ISP_LOGWARN,
3021 "event 0x%x for unhandled target action",
3022 ((isphdr_t *)arg)->rqs_entry_type);
3024 case RQSTYPE_NOTIFY:
3026 rv = isp_handle_platform_notify_scsi(isp,
3027 (in_entry_t *) arg);
3029 rv = isp_handle_platform_notify_fc(isp,
3030 (in_fcentry_t *) arg);
3034 rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
3037 rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
3041 rv = isp_handle_platform_ctio(isp, arg);
3043 case RQSTYPE_ENABLE_LUN:
3044 case RQSTYPE_MODIFY_LUN:
3045 if (IS_DUALBUS(isp)) {
3047 GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd);
3051 isp_cv_signal_rqe(isp, bus,
3052 ((lun_entry_t *)arg)->le_status);
3057 case ISPASYNC_FW_CRASH:
3059 u_int16_t mbox1, mbox6;
3060 mbox1 = ISP_READ(isp, OUTMAILBOX1);
3061 if (IS_DUALBUS(isp)) {
3062 mbox6 = ISP_READ(isp, OUTMAILBOX6);
3066 isp_prt(isp, ISP_LOGERR,
3067 "Internal Firmware Error on bus %d @ RISC Address 0x%x",
3069 #ifdef ISP_FW_CRASH_DUMP
3071 * XXX: really need a thread to do this right.
3074 FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT;
3075 FCPARAM(isp)->isp_loopstate = LOOP_NIL;
3076 isp_freeze_loopdown(isp, "f/w crash");
3080 isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3084 case ISPASYNC_UNHANDLED_RESPONSE:
3087 isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3095 * Locks are held before coming here.
3098 isp_uninit(struct ispsoftc *isp)
3100 ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
3105 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
3108 if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3111 printf("%s: ", device_get_nameunit(isp->isp_dev));