2 * Copyright (c) 2003 Hidetoshi Shimokawa
3 * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the acknowledgement as bellow:
17 * This product includes software developed by K. Kobayashi and H. Shimokawa
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
26 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
27 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/module.h>
42 #include <sys/kernel.h>
43 #include <sys/sysctl.h>
44 #include <machine/bus.h>
45 #include <sys/malloc.h>
46 #if defined(__FreeBSD__) && __FreeBSD_version >= 501102
48 #include <sys/mutex.h>
51 #if defined(__DragonFly__) || __FreeBSD_version < 500106
52 #include <sys/devicestat.h> /* for struct devstat */
56 #include <bus/cam/cam.h>
57 #include <bus/cam/cam_ccb.h>
58 #include <bus/cam/cam_sim.h>
59 #include <bus/cam/cam_xpt_sim.h>
60 #include <bus/cam/cam_debug.h>
61 #include <bus/cam/cam_periph.h>
62 #include <bus/cam/scsi/scsi_all.h>
64 #include <bus/firewire/firewire.h>
65 #include <bus/firewire/firewirereg.h>
66 #include <bus/firewire/fwdma.h>
67 #include <bus/firewire/iec13213.h>
71 #include <cam/cam_ccb.h>
72 #include <cam/cam_sim.h>
73 #include <cam/cam_xpt_sim.h>
74 #include <cam/cam_debug.h>
75 #include <cam/cam_periph.h>
76 #include <cam/scsi/scsi_all.h>
78 #include <dev/firewire/firewire.h>
79 #include <dev/firewire/firewirereg.h>
80 #include <dev/firewire/fwdma.h>
81 #include <dev/firewire/iec13213.h>
82 #include <dev/firewire/sbp.h>
85 #define ccb_sdev_ptr spriv_ptr0
86 #define ccb_sbp_ptr spriv_ptr1
88 #define SBP_NUM_TARGETS 8 /* MAX 64 */
90 * Scan_bus doesn't work for more than 8 LUNs
91 * because of CAM_SCSI2_MAXLUN in cam_xpt.c
93 #define SBP_NUM_LUNS 64
94 #define SBP_MAXPHYS MIN(MAXPHYS, (512*1024) /* 512KB */)
95 #define SBP_DMA_SIZE PAGE_SIZE
96 #define SBP_LOGIN_SIZE sizeof(struct sbp_login_res)
97 #define SBP_QUEUE_LEN ((SBP_DMA_SIZE - SBP_LOGIN_SIZE) / sizeof(struct sbp_ocb))
98 #define SBP_NUM_OCB (SBP_QUEUE_LEN * SBP_NUM_TARGETS)
101 * STATUS FIFO addressing
103 * -----------------------
104 * 0- 1( 2): 0 (alignment)
107 * 16-31( 8): reserved
108 * 32-47(16): SBP_BIND_HI
109 * 48-64(16): bus_id, node_id
111 #define SBP_BIND_HI 0x1
112 #define SBP_DEV2ADDR(t, l) \
113 (((u_int64_t)SBP_BIND_HI << 32) \
114 | (((l) & 0xff) << 8) \
115 | (((t) & 0x3f) << 2))
116 #define SBP_ADDR2TRG(a) (((a) >> 2) & 0x3f)
117 #define SBP_ADDR2LUN(a) (((a) >> 8) & 0xff)
118 #define SBP_INITIATOR 7
120 static char *orb_fun_name[] = {
124 static int debug = 0;
125 static int auto_login = 1;
126 static int max_speed = -1;
127 static int sbp_cold = 1;
128 static int ex_login = 1;
129 static int login_delay = 1000; /* msec */
130 static int scan_delay = 500; /* msec */
131 static int use_doorbell = 0;
132 static int sbp_tags = 0;
134 SYSCTL_DECL(_hw_firewire);
135 static SYSCTL_NODE(_hw_firewire, OID_AUTO, sbp, CTLFLAG_RD, 0,
137 SYSCTL_INT(_debug, OID_AUTO, sbp_debug, CTLFLAG_RW, &debug, 0,
139 SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, auto_login, CTLFLAG_RW, &auto_login, 0,
140 "SBP perform login automatically");
141 SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, max_speed, CTLFLAG_RW, &max_speed, 0,
142 "SBP transfer max speed");
143 SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, exclusive_login, CTLFLAG_RW,
144 &ex_login, 0, "SBP enable exclusive login");
145 SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, login_delay, CTLFLAG_RW,
146 &login_delay, 0, "SBP login delay in msec");
147 SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, scan_delay, CTLFLAG_RW,
148 &scan_delay, 0, "SBP scan delay in msec");
149 SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, use_doorbell, CTLFLAG_RW,
150 &use_doorbell, 0, "SBP use doorbell request");
151 SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, tags, CTLFLAG_RW, &sbp_tags, 0,
152 "SBP tagged queuing support");
154 TUNABLE_INT("hw.firewire.sbp.auto_login", &auto_login);
155 TUNABLE_INT("hw.firewire.sbp.max_speed", &max_speed);
156 TUNABLE_INT("hw.firewire.sbp.exclusive_login", &ex_login);
157 TUNABLE_INT("hw.firewire.sbp.login_delay", &login_delay);
158 TUNABLE_INT("hw.firewire.sbp.scan_delay", &scan_delay);
159 TUNABLE_INT("hw.firewire.sbp.use_doorbell", &use_doorbell);
160 TUNABLE_INT("hw.firewire.sbp.tags", &sbp_tags);
162 #define NEED_RESPONSE 0
164 #define SBP_SEG_MAX rounddown(0xffff, PAGE_SIZE)
165 #ifdef __sparc64__ /* iommu */
166 #define SBP_IND_MAX howmany(SBP_MAXPHYS, SBP_SEG_MAX)
168 #define SBP_IND_MAX howmany(SBP_MAXPHYS, PAGE_SIZE)
171 STAILQ_ENTRY(sbp_ocb) ocb;
175 #define IND_PTR_OFFSET (8*sizeof(uint32_t))
176 struct ind_ptr ind_ptr[SBP_IND_MAX];
177 struct sbp_dev *sdev;
178 int flags; /* XXX should be removed */
182 #define OCB_ACT_MGM 0
183 #define OCB_ACT_CMD 1
184 #define OCB_MATCH(o,s) ((o)->bus_addr == ntohl((s)->orb_lo))
187 #define SBP_DEV_RESET 0 /* accept login */
188 #define SBP_DEV_LOGIN 1 /* to login */
190 #define SBP_DEV_RECONN 2 /* to reconnect */
192 #define SBP_DEV_TOATTACH 3 /* to attach */
193 #define SBP_DEV_PROBE 4 /* scan lun */
194 #define SBP_DEV_ATTACHED 5 /* in operation */
195 #define SBP_DEV_DEAD 6 /* unavailable unit */
196 #define SBP_DEV_RETRY 7 /* unavailable unit */
202 #define ORB_LINK_DEAD (1 << 0)
203 #define VALID_LUN (1 << 1)
204 #define ORB_POINTER_ACTIVE (1 << 2)
205 #define ORB_POINTER_NEED (1 << 3)
206 #define ORB_DOORBELL_ACTIVE (1 << 4)
207 #define ORB_DOORBELL_NEED (1 << 5)
208 #define ORB_SHORTAGE (1 << 6)
210 struct cam_path *path;
211 struct sbp_target *target;
212 struct fwdma_alloc dma;
213 struct sbp_login_res *login;
214 struct callout login_callout;
216 STAILQ_HEAD(, sbp_ocb) ocbs;
217 STAILQ_HEAD(, sbp_ocb) free_ocbs;
218 struct sbp_ocb *last_ocb;
228 struct sbp_dev **luns;
229 struct sbp_softc *sbp;
230 struct fw_device *fwdev;
231 uint32_t mgm_hi, mgm_lo;
232 struct sbp_ocb *mgm_ocb_cur;
233 STAILQ_HEAD(, sbp_ocb) mgm_ocb_queue;
234 struct callout mgm_ocb_timeout;
235 struct callout scan_callout;
236 STAILQ_HEAD(, fw_xfer) xferlist;
241 struct firewire_dev_comm fd;
243 struct cam_path *path;
244 struct sbp_target targets[SBP_NUM_TARGETS];
247 struct timeval last_busreset;
248 #define SIMQ_FREEZED 1
252 #define SBP_LOCK(sbp) mtx_lock(&(sbp)->mtx)
253 #define SBP_UNLOCK(sbp) mtx_unlock(&(sbp)->mtx)
255 static void sbp_post_explore (void *);
256 static void sbp_recv (struct fw_xfer *);
257 static void sbp_mgm_callback (struct fw_xfer *);
259 static void sbp_cmd_callback (struct fw_xfer *);
261 static void sbp_orb_pointer (struct sbp_dev *, struct sbp_ocb *);
262 static void sbp_doorbell(struct sbp_dev *);
263 static void sbp_execute_ocb (void *, bus_dma_segment_t *, int, int);
264 static void sbp_free_ocb (struct sbp_dev *, struct sbp_ocb *);
265 static void sbp_abort_ocb (struct sbp_ocb *, int);
266 static void sbp_abort_all_ocbs (struct sbp_dev *, int);
267 static struct fw_xfer * sbp_write_cmd_locked (struct sbp_dev *, int, int);
268 static struct fw_xfer * sbp_write_cmd (struct sbp_dev *, int, int);
269 static struct sbp_ocb * sbp_get_ocb (struct sbp_dev *);
270 static struct sbp_ocb * sbp_enqueue_ocb (struct sbp_dev *, struct sbp_ocb *);
271 static struct sbp_ocb * sbp_dequeue_ocb (struct sbp_dev *, struct sbp_status *);
272 static void sbp_cam_detach_sdev(struct sbp_dev *);
273 static void sbp_free_sdev(struct sbp_dev *);
274 static void sbp_cam_detach_target (struct sbp_target *);
275 static void sbp_free_target (struct sbp_target *);
276 static void sbp_mgm_timeout (void *arg);
277 static void sbp_timeout (void *arg);
278 static void sbp_mgm_orb (struct sbp_dev *, int, struct sbp_ocb *);
280 static MALLOC_DEFINE(M_SBP, "sbp", "SBP-II/FireWire");
282 /* cam related functions */
283 static void sbp_action(struct cam_sim *sim, union ccb *ccb);
284 static void sbp_poll(struct cam_sim *sim);
285 static void sbp_cam_scan_lun(struct cam_periph *, union ccb *);
286 static void sbp_cam_scan_target(void *arg);
288 static char *orb_status0[] = {
289 /* 0 */ "No additional information to report",
290 /* 1 */ "Request type not supported",
291 /* 2 */ "Speed not supported",
292 /* 3 */ "Page size not supported",
293 /* 4 */ "Access denied",
294 /* 5 */ "Logical unit not supported",
295 /* 6 */ "Maximum payload too small",
296 /* 7 */ "Reserved for future standardization",
297 /* 8 */ "Resources unavailable",
298 /* 9 */ "Function rejected",
299 /* A */ "Login ID not recognized",
300 /* B */ "Dummy ORB completed",
301 /* C */ "Request aborted",
302 /* FF */ "Unspecified error"
303 #define MAX_ORB_STATUS0 0xd
306 static char *orb_status1_object[] = {
307 /* 0 */ "Operation request block (ORB)",
308 /* 1 */ "Data buffer",
309 /* 2 */ "Page table",
310 /* 3 */ "Unable to specify"
313 static char *orb_status1_serial_bus_error[] = {
314 /* 0 */ "Missing acknowledge",
315 /* 1 */ "Reserved; not to be used",
316 /* 2 */ "Time-out error",
317 /* 3 */ "Reserved; not to be used",
318 /* 4 */ "Busy retry limit exceeded(X)",
319 /* 5 */ "Busy retry limit exceeded(A)",
320 /* 6 */ "Busy retry limit exceeded(B)",
321 /* 7 */ "Reserved for future standardization",
322 /* 8 */ "Reserved for future standardization",
323 /* 9 */ "Reserved for future standardization",
324 /* A */ "Reserved for future standardization",
325 /* B */ "Tardy retry limit exceeded",
326 /* C */ "Conflict error",
327 /* D */ "Data error",
328 /* E */ "Type error",
329 /* F */ "Address error"
333 sbp_identify(driver_t *driver, device_t parent)
336 printf("sbp_identify\n");
339 BUS_ADD_CHILD(parent, 0, "sbp", device_get_unit(parent));
346 sbp_probe(device_t dev)
351 printf("sbp_probe\n");
354 pa = device_get_parent(dev);
355 if(device_get_unit(dev) != device_get_unit(pa)){
359 device_set_desc(dev, "SBP-2/SCSI over FireWire");
370 * Display device characteristics on the console
373 sbp_show_sdev_info(struct sbp_dev *sdev)
375 struct fw_device *fwdev;
377 fwdev = sdev->target->fwdev;
378 device_printf(sdev->target->sbp->fd.dev,
379 "%s: %s: ordered:%d type:%d EUI:%08x%08x node:%d "
380 "speed:%d maxrec:%d\n",
383 (sdev->type & 0x40) >> 6,
391 device_printf(sdev->target->sbp->fd.dev,
392 "%s: %s '%s' '%s' '%s'\n",
405 /* Bus Target EUI64 */
407 {0, 2, {0x00018ea0, 0x01fd0154}}, /* Logitec HDD */
408 {0, 0, {0x00018ea6, 0x00100682}}, /* Logitec DVD */
409 {0, 1, {0x00d03200, 0xa412006a}}, /* Yano HDD */
415 sbp_new_target(struct sbp_softc *sbp, struct fw_device *fwdev)
417 int bus, i, target=-1;
418 char w[SBP_NUM_TARGETS];
421 bus = device_get_unit(sbp->fd.dev);
423 /* XXX wired-down configuration should be gotten from
424 tunable or device hint */
425 for (i = 0; wired[i].bus >= 0; i ++) {
426 if (wired[i].bus == bus) {
427 w[wired[i].target] = 1;
428 if (wired[i].eui.hi == fwdev->eui.hi &&
429 wired[i].eui.lo == fwdev->eui.lo)
430 target = wired[i].target;
434 if(target < SBP_NUM_TARGETS &&
435 sbp->targets[target].fwdev == NULL)
437 device_printf(sbp->fd.dev,
438 "target %d is not free for %08x:%08x\n",
439 target, fwdev->eui.hi, fwdev->eui.lo);
442 /* non-wired target */
443 for (i = 0; i < SBP_NUM_TARGETS; i ++)
444 if (sbp->targets[i].fwdev == NULL && w[i] == 0) {
453 sbp_alloc_lun(struct sbp_target *target)
455 struct crom_context cc;
457 struct sbp_dev *sdev, **newluns;
458 struct sbp_softc *sbp;
462 crom_init_context(&cc, target->fwdev->csrrom);
463 /* XXX shoud parse appropriate unit directories only */
465 while (cc.depth >= 0) {
466 reg = crom_search_key(&cc, CROM_LUN);
469 lun = reg->val & 0xffff;
471 printf("target %d lun %d found\n", target->target_id, lun);
478 printf("%s:%d no LUN found\n",
479 device_get_nameunit(target->sbp->fd.dev),
483 if (maxlun >= SBP_NUM_LUNS)
484 maxlun = SBP_NUM_LUNS;
486 /* Invalidiate stale devices */
487 for (lun = 0; lun < target->num_lun; lun ++) {
488 sdev = target->luns[lun];
491 sdev->flags &= ~VALID_LUN;
494 sbp_cam_detach_sdev(sdev);
496 target->luns[lun] = NULL;
501 if (maxlun != target->num_lun) {
502 newluns = (struct sbp_dev **) realloc(target->luns,
503 sizeof(struct sbp_dev *) * maxlun,
504 M_SBP, M_NOWAIT | M_ZERO);
506 if (newluns == NULL) {
507 printf("%s: realloc failed\n", __func__);
508 newluns = target->luns;
509 maxlun = target->num_lun;
513 * We must zero the extended region for the case
514 * realloc() doesn't allocate new buffer.
516 if (maxlun > target->num_lun)
517 bzero(&newluns[target->num_lun],
518 sizeof(struct sbp_dev *) *
519 (maxlun - target->num_lun));
521 target->luns = newluns;
522 target->num_lun = maxlun;
525 crom_init_context(&cc, target->fwdev->csrrom);
526 while (cc.depth >= 0) {
529 reg = crom_search_key(&cc, CROM_LUN);
532 lun = reg->val & 0xffff;
533 if (lun >= SBP_NUM_LUNS) {
534 printf("too large lun %d\n", lun);
538 sdev = target->luns[lun];
540 sdev = malloc(sizeof(struct sbp_dev),
541 M_SBP, M_NOWAIT | M_ZERO);
543 printf("%s: malloc failed\n", __func__);
546 target->luns[lun] = sdev;
548 sdev->target = target;
549 STAILQ_INIT(&sdev->ocbs);
550 CALLOUT_INIT(&sdev->login_callout);
551 sdev->status = SBP_DEV_RESET;
553 snprintf(sdev->bustgtlun, 32, "%s:%d:%d",
554 device_get_nameunit(sdev->target->sbp->fd.dev),
555 sdev->target->target_id,
558 sdev->flags |= VALID_LUN;
559 sdev->type = (reg->val & 0xff0000) >> 16;
564 fwdma_malloc(sbp->fd.fc,
565 /* alignment */ sizeof(uint32_t),
566 SBP_DMA_SIZE, &sdev->dma, BUS_DMA_NOWAIT |
568 if (sdev->dma.v_addr == NULL) {
569 printf("%s: dma space allocation failed\n",
572 target->luns[lun] = NULL;
575 sdev->login = (struct sbp_login_res *) sdev->dma.v_addr;
576 sdev->ocb = (struct sbp_ocb *)
577 ((char *)sdev->dma.v_addr + SBP_LOGIN_SIZE);
578 bzero((char *)sdev->ocb,
579 sizeof (struct sbp_ocb) * SBP_QUEUE_LEN);
581 STAILQ_INIT(&sdev->free_ocbs);
582 for (i = 0; i < SBP_QUEUE_LEN; i++) {
585 ocb->bus_addr = sdev->dma.bus_addr
587 + sizeof(struct sbp_ocb) * i
588 + offsetof(struct sbp_ocb, orb[0]);
589 if (bus_dmamap_create(sbp->dmat, 0, &ocb->dmamap)) {
590 printf("sbp_attach: cannot create dmamap\n");
594 sbp_free_ocb(sdev, ocb);
600 for (lun = 0; lun < target->num_lun; lun ++) {
601 sdev = target->luns[lun];
602 if (sdev != NULL && (sdev->flags & VALID_LUN) == 0) {
603 sbp_cam_detach_sdev(sdev);
605 target->luns[lun] = NULL;
610 static struct sbp_target *
611 sbp_alloc_target(struct sbp_softc *sbp, struct fw_device *fwdev)
614 struct sbp_target *target;
615 struct crom_context cc;
619 printf("sbp_alloc_target\n");
621 i = sbp_new_target(sbp, fwdev);
623 device_printf(sbp->fd.dev, "increase SBP_NUM_TARGETS!\n");
627 target = &sbp->targets[i];
629 target->fwdev = fwdev;
630 target->target_id = i;
631 /* XXX we may want to reload mgm port after each bus reset */
632 /* XXX there might be multiple management agents */
633 crom_init_context(&cc, target->fwdev->csrrom);
634 reg = crom_search_key(&cc, CROM_MGM);
635 if (reg == NULL || reg->val == 0) {
636 printf("NULL management address\n");
637 target->fwdev = NULL;
640 target->mgm_hi = 0xffff;
641 target->mgm_lo = 0xf0000000 | (reg->val << 2);
642 target->mgm_ocb_cur = NULL;
644 printf("target:%d mgm_port: %x\n", i, target->mgm_lo);
646 STAILQ_INIT(&target->xferlist);
648 STAILQ_INIT(&target->mgm_ocb_queue);
649 CALLOUT_INIT(&target->mgm_ocb_timeout);
650 CALLOUT_INIT(&target->scan_callout);
658 sbp_probe_lun(struct sbp_dev *sdev)
660 struct fw_device *fwdev;
661 struct crom_context c, *cc = &c;
664 bzero(sdev->vendor, sizeof(sdev->vendor));
665 bzero(sdev->product, sizeof(sdev->product));
667 fwdev = sdev->target->fwdev;
668 crom_init_context(cc, fwdev->csrrom);
669 /* get vendor string */
670 crom_search_key(cc, CSRKEY_VENDOR);
672 crom_parse_text(cc, sdev->vendor, sizeof(sdev->vendor));
673 /* skip to the unit directory for SBP-2 */
674 while ((reg = crom_search_key(cc, CSRKEY_VER)) != NULL) {
675 if (reg->val == CSRVAL_T10SBP2)
679 /* get firmware revision */
680 reg = crom_search_key(cc, CSRKEY_FIRM_VER);
682 snprintf(sdev->revision, sizeof(sdev->revision),
684 /* get product string */
685 crom_search_key(cc, CSRKEY_MODEL);
687 crom_parse_text(cc, sdev->product, sizeof(sdev->product));
691 sbp_login_callout(void *arg)
693 struct sbp_dev *sdev = (struct sbp_dev *)arg;
694 sbp_mgm_orb(sdev, ORB_FUN_LGI, NULL);
698 sbp_login(struct sbp_dev *sdev)
700 struct timeval delta;
705 timevalsub(&delta, &sdev->target->sbp->last_busreset);
706 t.tv_sec = login_delay / 1000;
707 t.tv_usec = (login_delay % 1000) * 1000;
708 timevalsub(&t, &delta);
709 if (t.tv_sec >= 0 && t.tv_usec > 0)
710 ticks = (t.tv_sec * 1000 + t.tv_usec / 1000) * hz / 1000;
712 printf("%s: sec = %jd usec = %ld ticks = %d\n", __func__,
713 (intmax_t)t.tv_sec, t.tv_usec, ticks);
715 callout_reset(&sdev->login_callout, ticks,
716 sbp_login_callout, (void *)(sdev));
719 #define SBP_FWDEV_ALIVE(fwdev) (((fwdev)->status == FWDEVATTACHED) \
720 && crom_has_specver((fwdev)->csrrom, CSRVAL_ANSIT10, CSRVAL_T10SBP2))
723 sbp_probe_target(void *arg)
725 struct sbp_target *target = (struct sbp_target *)arg;
726 struct sbp_softc *sbp = target->sbp;
727 struct sbp_dev *sdev;
730 alive = SBP_FWDEV_ALIVE(target->fwdev);
732 device_printf(sbp->fd.dev, "%s %d%salive\n",
733 __func__, target->target_id,
734 (!alive) ? " not " : "");
738 sbp_alloc_lun(target);
740 /* XXX untimeout mgm_ocb and dequeue */
741 for (i=0; i < target->num_lun; i++) {
742 sdev = target->luns[i];
745 if (alive && (sdev->status != SBP_DEV_DEAD)) {
746 if (sdev->path != NULL) {
748 xpt_freeze_devq(sdev->path, 1);
753 sbp_show_sdev_info(sdev);
755 sbp_abort_all_ocbs(sdev, CAM_SCSI_BUS_RESET);
756 switch (sdev->status) {
758 /* new or revived target */
762 case SBP_DEV_TOATTACH:
764 case SBP_DEV_ATTACHED:
767 sbp_mgm_orb(sdev, ORB_FUN_RCN, NULL);
771 switch (sdev->status) {
772 case SBP_DEV_ATTACHED:
774 /* the device has gone */
775 device_printf(sbp->fd.dev, "%s: lost target\n",
780 xpt_freeze_devq(sdev->path, 1);
784 sdev->status = SBP_DEV_RETRY;
785 sbp_cam_detach_sdev(sdev);
787 target->luns[i] = NULL;
790 case SBP_DEV_TOATTACH:
791 sdev->status = SBP_DEV_RESET;
803 sbp_post_busreset(void *arg)
805 struct sbp_softc *sbp;
807 sbp = (struct sbp_softc *)arg;
809 printf("sbp_post_busreset\n");
811 if ((sbp->sim->flags & SIMQ_FREEZED) == 0) {
813 xpt_freeze_simq(sbp->sim, /*count*/1);
814 sbp->sim->flags |= SIMQ_FREEZED;
817 microtime(&sbp->last_busreset);
821 sbp_post_explore(void *arg)
823 struct sbp_softc *sbp = (struct sbp_softc *)arg;
824 struct sbp_target *target;
825 struct fw_device *fwdev;
829 printf("sbp_post_explore (sbp_cold=%d)\n", sbp_cold);
831 /* We need physical access */
832 if (!firewire_phydma_enable)
840 * XXX don't let CAM the bus rest.
841 * CAM tries to do something with freezed (DEV_RETRY) devices.
843 xpt_async(AC_BUS_RESET, sbp->path, /*arg*/ NULL);
846 /* Garbage Collection */
847 for(i = 0 ; i < SBP_NUM_TARGETS ; i ++){
848 target = &sbp->targets[i];
849 STAILQ_FOREACH(fwdev, &sbp->fd.fc->devices, link)
850 if (target->fwdev == NULL || target->fwdev == fwdev)
853 /* device has removed in lower driver */
854 sbp_cam_detach_target(target);
855 sbp_free_target(target);
858 /* traverse device list */
859 STAILQ_FOREACH(fwdev, &sbp->fd.fc->devices, link) {
861 device_printf(sbp->fd.dev,"%s:: EUI:%08x%08x %s attached, state=%d\n",
862 __func__, fwdev->eui.hi, fwdev->eui.lo,
863 (fwdev->status != FWDEVATTACHED) ? "not" : "",
866 alive = SBP_FWDEV_ALIVE(fwdev);
867 for(i = 0 ; i < SBP_NUM_TARGETS ; i ++){
868 target = &sbp->targets[i];
869 if(target->fwdev == fwdev ) {
874 if(i == SBP_NUM_TARGETS){
877 target = sbp_alloc_target(sbp, fwdev);
884 sbp_probe_target((void *)target);
885 if (target->num_lun == 0)
886 sbp_free_target(target);
889 xpt_release_simq(sbp->sim, /*run queue*/TRUE);
890 sbp->sim->flags &= ~SIMQ_FREEZED;
896 sbp_loginres_callback(struct fw_xfer *xfer){
898 struct sbp_dev *sdev;
899 sdev = (struct sbp_dev *)xfer->sc;
901 device_printf(sdev->target->sbp->fd.dev,"%s\n", __func__);
905 STAILQ_INSERT_TAIL(&sdev->target->sbp->fwb.xferlist, xfer, link);
912 sbp_xfer_free(struct fw_xfer *xfer)
914 struct sbp_dev *sdev;
917 sdev = (struct sbp_dev *)xfer->sc;
918 fw_xfer_unload(xfer);
920 SBP_LOCK(sdev->target->sbp);
921 STAILQ_INSERT_TAIL(&sdev->target->xferlist, xfer, link);
922 SBP_UNLOCK(sdev->target->sbp);
927 sbp_reset_start_callback(struct fw_xfer *xfer)
929 struct sbp_dev *tsdev, *sdev = (struct sbp_dev *)xfer->sc;
930 struct sbp_target *target = sdev->target;
933 if (xfer->resp != 0) {
934 device_printf(sdev->target->sbp->fd.dev,
935 "%s: %s failed: resp=%d\n", __func__, sdev->bustgtlun, xfer->resp);
938 for (i = 0; i < target->num_lun; i++) {
939 tsdev = target->luns[i];
940 if (tsdev != NULL && tsdev->status == SBP_DEV_LOGIN)
946 sbp_reset_start(struct sbp_dev *sdev)
948 struct fw_xfer *xfer;
952 device_printf(sdev->target->sbp->fd.dev,
953 "%s:%s\n", __func__,sdev->bustgtlun);
956 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0);
957 xfer->hand = sbp_reset_start_callback;
958 fp = &xfer->send.hdr;
959 fp->mode.wreqq.dest_hi = 0xffff;
960 fp->mode.wreqq.dest_lo = 0xf0000000 | RESET_START;
961 fp->mode.wreqq.data = htonl(0xf);
962 fw_asyreq(xfer->fc, -1, xfer);
966 sbp_mgm_callback(struct fw_xfer *xfer)
968 struct sbp_dev *sdev;
971 sdev = (struct sbp_dev *)xfer->sc;
974 device_printf(sdev->target->sbp->fd.dev,
975 "%s:%s\n", __func__, sdev->bustgtlun);
982 static struct sbp_dev *
983 sbp_next_dev(struct sbp_target *target, int lun)
985 struct sbp_dev **sdevp;
988 for (i = lun, sdevp = &target->luns[lun]; i < target->num_lun;
990 if (*sdevp != NULL && (*sdevp)->status == SBP_DEV_PROBE)
997 sbp_cam_scan_lun(struct cam_periph *periph, union ccb *ccb)
999 struct sbp_target *target;
1000 struct sbp_dev *sdev;
1002 sdev = (struct sbp_dev *) ccb->ccb_h.ccb_sdev_ptr;
1003 target = sdev->target;
1005 device_printf(sdev->target->sbp->fd.dev,
1006 "%s:%s\n", __func__, sdev->bustgtlun);
1008 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1009 sdev->status = SBP_DEV_ATTACHED;
1011 device_printf(sdev->target->sbp->fd.dev,
1012 "%s:%s failed\n", __func__, sdev->bustgtlun);
1014 sdev = sbp_next_dev(target, sdev->lun_id + 1);
1020 xpt_setup_ccb(&ccb->ccb_h, sdev->path, SCAN_PRI);
1021 ccb->ccb_h.ccb_sdev_ptr = sdev;
1023 xpt_release_devq(sdev->path, sdev->freeze, TRUE);
1028 sbp_cam_scan_target(void *arg)
1030 struct sbp_target *target = (struct sbp_target *)arg;
1031 struct sbp_dev *sdev;
1034 sdev = sbp_next_dev(target, 0);
1036 printf("sbp_cam_scan_target: nothing to do for target%d\n",
1041 device_printf(sdev->target->sbp->fd.dev,
1042 "%s:%s\n", __func__, sdev->bustgtlun);
1044 ccb = malloc(sizeof(union ccb), M_SBP, M_NOWAIT | M_ZERO);
1046 printf("sbp_cam_scan_target: malloc failed\n");
1049 xpt_setup_ccb(&ccb->ccb_h, sdev->path, SCAN_PRI);
1050 ccb->ccb_h.func_code = XPT_SCAN_LUN;
1051 ccb->ccb_h.cbfcnp = sbp_cam_scan_lun;
1052 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1053 ccb->crcn.flags = CAM_FLAG_NONE;
1054 ccb->ccb_h.ccb_sdev_ptr = sdev;
1056 /* The scan is in progress now. */
1057 SBP_LOCK(target->sbp);
1059 xpt_release_devq(sdev->path, sdev->freeze, TRUE);
1061 SBP_UNLOCK(target->sbp);
1064 static __inline void
1065 sbp_scan_dev(struct sbp_dev *sdev)
1067 sdev->status = SBP_DEV_PROBE;
1068 callout_reset(&sdev->target->scan_callout, scan_delay * hz / 1000,
1069 sbp_cam_scan_target, (void *)sdev->target);
1073 sbp_do_attach(struct fw_xfer *xfer)
1075 struct sbp_dev *sdev;
1076 struct sbp_target *target;
1077 struct sbp_softc *sbp;
1079 sdev = (struct sbp_dev *)xfer->sc;
1080 target = sdev->target;
1083 device_printf(sdev->target->sbp->fd.dev,
1084 "%s:%s\n", __func__, sdev->bustgtlun);
1086 sbp_xfer_free(xfer);
1088 if (sdev->path == NULL)
1089 xpt_create_path(&sdev->path, NULL,
1090 cam_sim_path(target->sbp->sim),
1091 target->target_id, sdev->lun_id);
1094 * Let CAM scan the bus if we are in the boot process.
1095 * XXX xpt_scan_bus cannot detect LUN larger than 0
1096 * if LUN 0 doesn't exists.
1099 sdev->status = SBP_DEV_ATTACHED;
1108 sbp_agent_reset_callback(struct fw_xfer *xfer)
1110 struct sbp_dev *sdev;
1112 sdev = (struct sbp_dev *)xfer->sc;
1114 device_printf(sdev->target->sbp->fd.dev,
1115 "%s:%s\n", __func__, sdev->bustgtlun);
1117 if (xfer->resp != 0) {
1118 device_printf(sdev->target->sbp->fd.dev,
1119 "%s:%s resp=%d\n", __func__, sdev->bustgtlun, xfer->resp);
1122 sbp_xfer_free(xfer);
1124 SBP_LOCK(sdev->target->sbp);
1125 xpt_release_devq(sdev->path, sdev->freeze, TRUE);
1127 SBP_UNLOCK(sdev->target->sbp);
1132 sbp_agent_reset(struct sbp_dev *sdev)
1134 struct fw_xfer *xfer;
1138 device_printf(sdev->target->sbp->fd.dev,
1139 "%s:%s\n", __func__, sdev->bustgtlun);
1141 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x04);
1144 if (sdev->status == SBP_DEV_ATTACHED || sdev->status == SBP_DEV_PROBE)
1145 xfer->hand = sbp_agent_reset_callback;
1147 xfer->hand = sbp_do_attach;
1148 fp = &xfer->send.hdr;
1149 fp->mode.wreqq.data = htonl(0xf);
1150 fw_asyreq(xfer->fc, -1, xfer);
1151 sbp_abort_all_ocbs(sdev, CAM_BDR_SENT);
1155 sbp_busy_timeout_callback(struct fw_xfer *xfer)
1157 struct sbp_dev *sdev;
1159 sdev = (struct sbp_dev *)xfer->sc;
1161 device_printf(sdev->target->sbp->fd.dev,
1162 "%s:%s\n", __func__, sdev->bustgtlun);
1164 sbp_xfer_free(xfer);
1165 sbp_agent_reset(sdev);
1169 sbp_busy_timeout(struct sbp_dev *sdev)
1172 struct fw_xfer *xfer;
1174 device_printf(sdev->target->sbp->fd.dev,
1175 "%s:%s\n", __func__, sdev->bustgtlun);
1177 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0);
1179 xfer->hand = sbp_busy_timeout_callback;
1180 fp = &xfer->send.hdr;
1181 fp->mode.wreqq.dest_hi = 0xffff;
1182 fp->mode.wreqq.dest_lo = 0xf0000000 | BUSY_TIMEOUT;
1183 fp->mode.wreqq.data = htonl((1 << (13+12)) | 0xf);
1184 fw_asyreq(xfer->fc, -1, xfer);
1188 sbp_orb_pointer_callback(struct fw_xfer *xfer)
1190 struct sbp_dev *sdev;
1191 sdev = (struct sbp_dev *)xfer->sc;
1194 device_printf(sdev->target->sbp->fd.dev,
1195 "%s:%s\n", __func__, sdev->bustgtlun);
1197 if (xfer->resp != 0) {
1199 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
1201 sbp_xfer_free(xfer);
1203 SBP_LOCK(sdev->target->sbp);
1204 sdev->flags &= ~ORB_POINTER_ACTIVE;
1206 if ((sdev->flags & ORB_POINTER_NEED) != 0) {
1207 struct sbp_ocb *ocb;
1209 sdev->flags &= ~ORB_POINTER_NEED;
1210 ocb = STAILQ_FIRST(&sdev->ocbs);
1212 sbp_orb_pointer(sdev, ocb);
1214 SBP_UNLOCK(sdev->target->sbp);
1219 sbp_orb_pointer(struct sbp_dev *sdev, struct sbp_ocb *ocb)
1221 struct fw_xfer *xfer;
1224 device_printf(sdev->target->sbp->fd.dev,
1226 __func__, sdev->bustgtlun,
1227 (uint32_t)ocb->bus_addr);
1230 mtx_assert(&sdev->target->sbp->mtx, MA_OWNED);
1232 if ((sdev->flags & ORB_POINTER_ACTIVE) != 0) {
1234 printf("%s: orb pointer active\n", __func__);
1236 sdev->flags |= ORB_POINTER_NEED;
1240 sdev->flags |= ORB_POINTER_ACTIVE;
1241 xfer = sbp_write_cmd_locked(sdev, FWTCODE_WREQB, 0x08);
1244 xfer->hand = sbp_orb_pointer_callback;
1246 fp = &xfer->send.hdr;
1247 fp->mode.wreqb.len = 8;
1248 fp->mode.wreqb.extcode = 0;
1249 xfer->send.payload[0] =
1250 htonl(((sdev->target->sbp->fd.fc->nodeid | FWLOCALBUS )<< 16));
1251 xfer->send.payload[1] = htonl((uint32_t)ocb->bus_addr);
1254 * sbp_xfer_free() will attempt to acquire
1255 * the SBP lock on entrance. Also, this removes
1256 * a LOR between the firewire layer and sbp
1258 SBP_UNLOCK(sdev->target->sbp);
1259 if(fw_asyreq(xfer->fc, -1, xfer) != 0){
1260 sbp_xfer_free(xfer);
1261 ocb->ccb->ccb_h.status = CAM_REQ_INVALID;
1264 SBP_LOCK(sdev->target->sbp);
1268 sbp_doorbell_callback(struct fw_xfer *xfer)
1270 struct sbp_dev *sdev;
1271 sdev = (struct sbp_dev *)xfer->sc;
1274 device_printf(sdev->target->sbp->fd.dev,
1275 "%s:%s\n", __func__, sdev->bustgtlun);
1277 if (xfer->resp != 0) {
1279 device_printf(sdev->target->sbp->fd.dev,
1280 "%s: xfer->resp = %d\n", __func__, xfer->resp);
1282 sbp_xfer_free(xfer);
1283 sdev->flags &= ~ORB_DOORBELL_ACTIVE;
1284 if ((sdev->flags & ORB_DOORBELL_NEED) != 0) {
1285 sdev->flags &= ~ORB_DOORBELL_NEED;
1286 SBP_LOCK(sdev->target->sbp);
1288 SBP_UNLOCK(sdev->target->sbp);
1294 sbp_doorbell(struct sbp_dev *sdev)
1296 struct fw_xfer *xfer;
1299 device_printf(sdev->target->sbp->fd.dev,
1300 "%s:%s\n", __func__, sdev->bustgtlun);
1303 if ((sdev->flags & ORB_DOORBELL_ACTIVE) != 0) {
1304 sdev->flags |= ORB_DOORBELL_NEED;
1307 sdev->flags |= ORB_DOORBELL_ACTIVE;
1308 xfer = sbp_write_cmd_locked(sdev, FWTCODE_WREQQ, 0x10);
1311 xfer->hand = sbp_doorbell_callback;
1312 fp = &xfer->send.hdr;
1313 fp->mode.wreqq.data = htonl(0xf);
1314 fw_asyreq(xfer->fc, -1, xfer);
1317 static struct fw_xfer *
1318 sbp_write_cmd_locked(struct sbp_dev *sdev, int tcode, int offset)
1320 struct fw_xfer *xfer;
1322 struct sbp_target *target;
1325 mtx_assert(&sdev->target->sbp->mtx, MA_OWNED);
1327 target = sdev->target;
1329 xfer = STAILQ_FIRST(&target->xferlist);
1331 if (target->n_xfer > 5 /* XXX */) {
1332 printf("sbp: no more xfer for this target\n");
1336 xfer = fw_xfer_alloc_buf(M_SBP, 8, 0);
1338 printf("sbp: fw_xfer_alloc_buf failed\n");
1344 printf("sbp: alloc %d xfer\n", target->n_xfer);
1347 STAILQ_REMOVE_HEAD(&target->xferlist, link);
1352 xfer->recv.pay_len = 0;
1353 xfer->send.spd = min(sdev->target->fwdev->speed, max_speed);
1354 xfer->fc = sdev->target->sbp->fd.fc;
1357 if (tcode == FWTCODE_WREQB)
1358 xfer->send.pay_len = 8;
1360 xfer->send.pay_len = 0;
1362 xfer->sc = (caddr_t)sdev;
1363 fp = &xfer->send.hdr;
1364 fp->mode.wreqq.dest_hi = sdev->login->cmd_hi;
1365 fp->mode.wreqq.dest_lo = sdev->login->cmd_lo + offset;
1366 fp->mode.wreqq.tlrt = 0;
1367 fp->mode.wreqq.tcode = tcode;
1368 fp->mode.wreqq.pri = 0;
1369 fp->mode.wreqq.dst = FWLOCALBUS | sdev->target->fwdev->dst;
1375 static struct fw_xfer *
1376 sbp_write_cmd(struct sbp_dev *sdev, int tcode, int offset)
1378 struct sbp_softc *sbp = sdev->target->sbp;
1379 struct fw_xfer *xfer;
1382 xfer = sbp_write_cmd_locked(sdev, tcode, offset);
1389 sbp_mgm_orb(struct sbp_dev *sdev, int func, struct sbp_ocb *aocb)
1391 struct fw_xfer *xfer;
1393 struct sbp_ocb *ocb;
1394 struct sbp_target *target;
1397 target = sdev->target;
1398 nid = target->sbp->fd.fc->nodeid | FWLOCALBUS;
1401 SBP_LOCK(target->sbp);
1402 if (func == ORB_FUN_RUNQUEUE) {
1403 ocb = STAILQ_FIRST(&target->mgm_ocb_queue);
1404 if (target->mgm_ocb_cur != NULL || ocb == NULL) {
1405 SBP_UNLOCK(target->sbp);
1409 STAILQ_REMOVE_HEAD(&target->mgm_ocb_queue, ocb);
1410 SBP_UNLOCK(target->sbp);
1413 if ((ocb = sbp_get_ocb(sdev)) == NULL) {
1414 SBP_UNLOCK(target->sbp);
1419 SBP_UNLOCK(target->sbp);
1420 ocb->flags = OCB_ACT_MGM;
1423 bzero((void *)ocb->orb, sizeof(ocb->orb));
1424 ocb->orb[6] = htonl((nid << 16) | SBP_BIND_HI);
1425 ocb->orb[7] = htonl(SBP_DEV2ADDR(target->target_id, sdev->lun_id));
1428 device_printf(sdev->target->sbp->fd.dev,
1430 __func__,sdev->bustgtlun,
1431 orb_fun_name[(func>>16)&0xf]);
1435 ocb->orb[0] = ocb->orb[1] = 0; /* password */
1436 ocb->orb[2] = htonl(nid << 16);
1437 ocb->orb[3] = htonl(sdev->dma.bus_addr);
1438 ocb->orb[4] = htonl(ORB_NOTIFY | sdev->lun_id);
1440 ocb->orb[4] |= htonl(ORB_EXV);
1441 ocb->orb[5] = htonl(SBP_LOGIN_SIZE);
1442 fwdma_sync(&sdev->dma, BUS_DMASYNC_PREREAD);
1445 ocb->orb[0] = htonl((0 << 16) | 0);
1446 ocb->orb[1] = htonl(aocb->bus_addr & 0xffffffff);
1453 ocb->orb[4] = htonl(ORB_NOTIFY | func | sdev->login->id);
1457 if (target->mgm_ocb_cur != NULL) {
1458 /* there is a standing ORB */
1459 SBP_LOCK(target->sbp);
1460 STAILQ_INSERT_TAIL(&sdev->target->mgm_ocb_queue, ocb, ocb);
1461 SBP_UNLOCK(target->sbp);
1466 target->mgm_ocb_cur = ocb;
1469 callout_reset(&target->mgm_ocb_timeout, 5*hz,
1470 sbp_mgm_timeout, (caddr_t)ocb);
1471 xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0);
1475 xfer->hand = sbp_mgm_callback;
1477 fp = &xfer->send.hdr;
1478 fp->mode.wreqb.dest_hi = sdev->target->mgm_hi;
1479 fp->mode.wreqb.dest_lo = sdev->target->mgm_lo;
1480 fp->mode.wreqb.len = 8;
1481 fp->mode.wreqb.extcode = 0;
1482 xfer->send.payload[0] = htonl(nid << 16);
1483 xfer->send.payload[1] = htonl(ocb->bus_addr & 0xffffffff);
1485 fw_asyreq(xfer->fc, -1, xfer);
1489 sbp_print_scsi_cmd(struct sbp_ocb *ocb)
1491 struct ccb_scsiio *csio;
1493 csio = &ocb->ccb->csio;
1494 printf("%s:%d:%d XPT_SCSI_IO: "
1495 "cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x"
1497 "%db cmd/%db data/%db sense\n",
1498 device_get_nameunit(ocb->sdev->target->sbp->fd.dev),
1499 ocb->ccb->ccb_h.target_id, ocb->ccb->ccb_h.target_lun,
1500 csio->cdb_io.cdb_bytes[0],
1501 csio->cdb_io.cdb_bytes[1],
1502 csio->cdb_io.cdb_bytes[2],
1503 csio->cdb_io.cdb_bytes[3],
1504 csio->cdb_io.cdb_bytes[4],
1505 csio->cdb_io.cdb_bytes[5],
1506 csio->cdb_io.cdb_bytes[6],
1507 csio->cdb_io.cdb_bytes[7],
1508 csio->cdb_io.cdb_bytes[8],
1509 csio->cdb_io.cdb_bytes[9],
1510 ocb->ccb->ccb_h.flags & CAM_DIR_MASK,
1511 csio->cdb_len, csio->dxfer_len,
1516 sbp_scsi_status(struct sbp_status *sbp_status, struct sbp_ocb *ocb)
1518 struct sbp_cmd_status *sbp_cmd_status;
1519 struct scsi_sense_data_fixed *sense;
1521 sbp_cmd_status = (struct sbp_cmd_status *)sbp_status->data;
1522 sense = (struct scsi_sense_data_fixed *)&ocb->ccb->csio.sense_data;
1525 sbp_print_scsi_cmd(ocb);
1526 /* XXX need decode status */
1527 printf("%s: SCSI status %x sfmt %x valid %x key %x code %x qlfr %x len %d\n",
1528 ocb->sdev->bustgtlun,
1529 sbp_cmd_status->status,
1530 sbp_cmd_status->sfmt,
1531 sbp_cmd_status->valid,
1532 sbp_cmd_status->s_key,
1533 sbp_cmd_status->s_code,
1534 sbp_cmd_status->s_qlfr,
1538 switch (sbp_cmd_status->status) {
1539 case SCSI_STATUS_CHECK_COND:
1540 case SCSI_STATUS_BUSY:
1541 case SCSI_STATUS_CMD_TERMINATED:
1542 if(sbp_cmd_status->sfmt == SBP_SFMT_CURR){
1543 sense->error_code = SSD_CURRENT_ERROR;
1545 sense->error_code = SSD_DEFERRED_ERROR;
1547 if(sbp_cmd_status->valid)
1548 sense->error_code |= SSD_ERRCODE_VALID;
1549 sense->flags = sbp_cmd_status->s_key;
1550 if(sbp_cmd_status->mark)
1551 sense->flags |= SSD_FILEMARK;
1552 if(sbp_cmd_status->eom)
1553 sense->flags |= SSD_EOM;
1554 if(sbp_cmd_status->ill_len)
1555 sense->flags |= SSD_ILI;
1557 bcopy(&sbp_cmd_status->info, &sense->info[0], 4);
1559 if (sbp_status->len <= 1)
1560 /* XXX not scsi status. shouldn't be happened */
1561 sense->extra_len = 0;
1562 else if (sbp_status->len <= 4)
1563 /* add_sense_code(_qual), info, cmd_spec_info */
1564 sense->extra_len = 6;
1566 /* fru, sense_key_spec */
1567 sense->extra_len = 10;
1569 bcopy(&sbp_cmd_status->cdb, &sense->cmd_spec_info[0], 4);
1571 sense->add_sense_code = sbp_cmd_status->s_code;
1572 sense->add_sense_code_qual = sbp_cmd_status->s_qlfr;
1573 sense->fru = sbp_cmd_status->fru;
1575 bcopy(&sbp_cmd_status->s_keydep[0],
1576 &sense->sense_key_spec[0], 3);
1578 ocb->ccb->csio.scsi_status = sbp_cmd_status->status;
1579 ocb->ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR
1580 | CAM_AUTOSNS_VALID;
1585 for( j = 0 ; j < 32 ; j+=8){
1586 printf("sense %02x%02x %02x%02x %02x%02x %02x%02x\n",
1587 tmp[j], tmp[j+1], tmp[j+2], tmp[j+3],
1588 tmp[j+4], tmp[j+5], tmp[j+6], tmp[j+7]);
1595 device_printf(ocb->sdev->target->sbp->fd.dev,
1596 "%s:%s unknown scsi status 0x%x\n",
1597 __func__, ocb->sdev->bustgtlun,
1598 sbp_cmd_status->status);
1603 sbp_fix_inq_data(struct sbp_ocb *ocb)
1606 struct sbp_dev *sdev;
1607 struct scsi_inquiry_data *inq;
1612 if (ccb->csio.cdb_io.cdb_bytes[1] & SI_EVPD)
1615 device_printf(sdev->target->sbp->fd.dev,
1616 "%s:%s\n", __func__, sdev->bustgtlun);
1618 inq = (struct scsi_inquiry_data *) ccb->csio.data_ptr;
1619 switch (SID_TYPE(inq)) {
1623 * XXX Convert Direct Access device to RBC.
1624 * I've never seen FireWire DA devices which support READ_6.
1626 if (SID_TYPE(inq) == T_DIRECT)
1627 inq->device |= T_RBC; /* T_DIRECT == 0 */
1632 * Override vendor/product/revision information.
1633 * Some devices sometimes return strange strings.
1636 bcopy(sdev->vendor, inq->vendor, sizeof(inq->vendor));
1637 bcopy(sdev->product, inq->product, sizeof(inq->product));
1638 bcopy(sdev->revision+2, inq->revision, sizeof(inq->revision));
1643 * Force to enable/disable tagged queuing.
1644 * XXX CAM also checks SCP_QUEUE_DQUE flag in the control mode page.
1647 inq->flags |= SID_CmdQue;
1648 else if (sbp_tags < 0)
1649 inq->flags &= ~SID_CmdQue;
1654 sbp_recv1(struct fw_xfer *xfer)
1660 struct sbp_softc *sbp;
1661 struct sbp_dev *sdev;
1662 struct sbp_ocb *ocb;
1663 struct sbp_login_res *login_res = NULL;
1664 struct sbp_status *sbp_status;
1665 struct sbp_target *target;
1666 int orb_fun, status_valid0, status_valid, t, l, reset_agent = 0;
1670 ld = xfer->recv.buf;
1671 printf("sbp %x %d %d %08x %08x %08x %08x\n",
1672 xfer->resp, xfer->recv.len, xfer->recv.off, ntohl(ld[0]), ntohl(ld[1]), ntohl(ld[2]), ntohl(ld[3]));
1673 printf("sbp %08x %08x %08x %08x\n", ntohl(ld[4]), ntohl(ld[5]), ntohl(ld[6]), ntohl(ld[7]));
1674 printf("sbp %08x %08x %08x %08x\n", ntohl(ld[8]), ntohl(ld[9]), ntohl(ld[10]), ntohl(ld[11]));
1676 sbp = (struct sbp_softc *)xfer->sc;
1677 if (xfer->resp != 0){
1678 printf("sbp_recv: xfer->resp = %d\n", xfer->resp);
1681 if (xfer->recv.payload == NULL){
1682 printf("sbp_recv: xfer->recv.payload == NULL\n");
1685 rfp = &xfer->recv.hdr;
1686 if(rfp->mode.wreqb.tcode != FWTCODE_WREQB){
1687 printf("sbp_recv: tcode = %d\n", rfp->mode.wreqb.tcode);
1690 sbp_status = (struct sbp_status *)xfer->recv.payload;
1691 addr = rfp->mode.wreqb.dest_lo;
1693 printf("received address 0x%x\n", addr);
1695 t = SBP_ADDR2TRG(addr);
1696 if (t >= SBP_NUM_TARGETS) {
1697 device_printf(sbp->fd.dev,
1698 "sbp_recv1: invalid target %d\n", t);
1701 target = &sbp->targets[t];
1702 l = SBP_ADDR2LUN(addr);
1703 if (l >= target->num_lun || target->luns[l] == NULL) {
1704 device_printf(sbp->fd.dev,
1705 "sbp_recv1: invalid lun %d (target=%d)\n", l, t);
1708 sdev = target->luns[l];
1711 switch (sbp_status->src) {
1714 /* check mgm_ocb_cur first */
1715 ocb = target->mgm_ocb_cur;
1717 if (OCB_MATCH(ocb, sbp_status)) {
1718 callout_stop(&target->mgm_ocb_timeout);
1719 target->mgm_ocb_cur = NULL;
1723 ocb = sbp_dequeue_ocb(sdev, sbp_status);
1725 device_printf(sdev->target->sbp->fd.dev,
1726 #if defined(__DragonFly__) || __FreeBSD_version < 500000
1727 "%s:%s No ocb(%lx) on the queue\n",
1729 "%s:%s No ocb(%x) on the queue\n",
1731 __func__,sdev->bustgtlun,
1732 ntohl(sbp_status->orb_lo));
1737 device_printf(sdev->target->sbp->fd.dev,
1738 "%s:%s unsolicit status received\n",
1739 __func__, sdev->bustgtlun);
1742 device_printf(sdev->target->sbp->fd.dev,
1743 "%s:%s unknown sbp_status->src\n",
1744 __func__, sdev->bustgtlun);
1747 status_valid0 = (sbp_status->src < 2
1748 && sbp_status->resp == ORB_RES_CMPL
1749 && sbp_status->dead == 0);
1750 status_valid = (status_valid0 && sbp_status->status == 0);
1752 if (!status_valid0 || debug > 2){
1755 device_printf(sdev->target->sbp->fd.dev,
1756 "%s:%s ORB status src:%x resp:%x dead:%x"
1757 #if defined(__DragonFly__) || __FreeBSD_version < 500000
1758 " len:%x stat:%x orb:%x%08lx\n",
1760 " len:%x stat:%x orb:%x%08x\n",
1762 __func__, sdev->bustgtlun,
1763 sbp_status->src, sbp_status->resp, sbp_status->dead,
1764 sbp_status->len, sbp_status->status,
1765 ntohs(sbp_status->orb_hi), ntohl(sbp_status->orb_lo));
1767 device_printf(sdev->target->sbp->fd.dev,
1768 "%s\n", sdev->bustgtlun);
1769 status = sbp_status->status;
1770 switch(sbp_status->resp) {
1772 if (status > MAX_ORB_STATUS0)
1773 printf("%s\n", orb_status0[MAX_ORB_STATUS0]);
1775 printf("%s\n", orb_status0[status]);
1778 printf("Obj: %s, Error: %s\n",
1779 orb_status1_object[(status>>6) & 3],
1780 orb_status1_serial_bus_error[status & 0xf]);
1783 printf("Illegal request\n");
1786 printf("Vendor dependent\n");
1789 printf("unknown respose code %d\n", sbp_status->resp);
1793 /* we have to reset the fetch agent if it's dead */
1794 if (sbp_status->dead) {
1797 xpt_freeze_devq(sdev->path, 1);
1807 switch(ntohl(ocb->orb[4]) & ORB_FMT_MSK){
1813 switch(ocb->flags) {
1815 orb_fun = ntohl(ocb->orb[4]) & ORB_FUN_MSK;
1819 fwdma_sync(&sdev->dma, BUS_DMASYNC_POSTREAD);
1820 login_res = sdev->login;
1821 login_res->len = ntohs(login_res->len);
1822 login_res->id = ntohs(login_res->id);
1823 login_res->cmd_hi = ntohs(login_res->cmd_hi);
1824 login_res->cmd_lo = ntohl(login_res->cmd_lo);
1827 device_printf(sdev->target->sbp->fd.dev,
1828 "%s:%s login: len %d, ID %d, cmd %08x%08x, recon_hold %d\n",
1829 __func__, sdev->bustgtlun,
1830 login_res->len, login_res->id,
1831 login_res->cmd_hi, login_res->cmd_lo,
1832 ntohs(login_res->recon_hold));
1834 sbp_busy_timeout(sdev);
1836 /* forgot logout? */
1837 device_printf(sdev->target->sbp->fd.dev,
1838 "%s:%s login failed\n",
1839 __func__, sdev->bustgtlun);
1840 sdev->status = SBP_DEV_RESET;
1844 login_res = sdev->login;
1847 device_printf(sdev->target->sbp->fd.dev,
1848 "%s:%s reconnect: len %d, ID %d, cmd %08x%08x\n",
1849 __func__, sdev->bustgtlun,
1850 login_res->len, login_res->id,
1851 login_res->cmd_hi, login_res->cmd_lo);
1853 if (sdev->status == SBP_DEV_ATTACHED)
1856 sbp_agent_reset(sdev);
1858 /* reconnection hold time exceed? */
1860 device_printf(sdev->target->sbp->fd.dev,
1861 "%s:%s reconnect failed\n",
1862 __func__, sdev->bustgtlun);
1868 sdev->status = SBP_DEV_RESET;
1871 sbp_busy_timeout(sdev);
1876 sbp_agent_reset(sdev);
1879 device_printf(sdev->target->sbp->fd.dev,
1880 "%s:%s unknown function %d\n",
1881 __func__, sdev->bustgtlun, orb_fun);
1884 sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL);
1888 if(ocb->ccb != NULL){
1892 if(sbp_status->len > 1){
1893 sbp_scsi_status(sbp_status, ocb);
1895 if(sbp_status->resp != ORB_RES_CMPL){
1896 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1898 ccb->ccb_h.status = CAM_REQ_CMP;
1901 /* fix up inq data */
1902 if (ccb->csio.cdb_io.cdb_bytes[0] == INQUIRY)
1903 sbp_fix_inq_data(ocb);
1915 sbp_free_ocb(sdev, ocb);
1918 sbp_agent_reset(sdev);
1921 xfer->recv.pay_len = SBP_RECV_LEN;
1922 /* The received packet is usually small enough to be stored within
1923 * the buffer. In that case, the controller return ack_complete and
1924 * no respose is necessary.
1926 * XXX fwohci.c and firewire.c should inform event_code such as
1927 * ack_complete or ack_pending to upper driver.
1931 sfp = (struct fw_pkt *)xfer->send.buf;
1932 sfp->mode.wres.dst = rfp->mode.wreqb.src;
1933 xfer->dst = sfp->mode.wres.dst;
1934 xfer->spd = min(sdev->target->fwdev->speed, max_speed);
1935 xfer->hand = sbp_loginres_callback;
1937 sfp->mode.wres.tlrt = rfp->mode.wreqb.tlrt;
1938 sfp->mode.wres.tcode = FWTCODE_WRES;
1939 sfp->mode.wres.rtcode = 0;
1940 sfp->mode.wres.pri = 0;
1942 fw_asyreq(xfer->fc, -1, xfer);
1945 /* we don't need a lock here because bottom half is serialized */
1946 STAILQ_INSERT_TAIL(&sbp->fwb.xferlist, xfer, link);
1954 sbp_recv(struct fw_xfer *xfer)
1966 sbp_attach(device_t dev)
1968 struct sbp_softc *sbp;
1969 struct cam_devq *devq;
1970 struct firewire_comm *fc;
1973 if (DFLTPHYS > SBP_MAXPHYS)
1974 device_printf(dev, "Warning, DFLTPHYS(%dKB) is larger than "
1975 "SBP_MAXPHYS(%dKB).\n", DFLTPHYS / 1024,
1976 SBP_MAXPHYS / 1024);
1978 if (!firewire_phydma_enable)
1979 device_printf(dev, "Warning, hw.firewire.phydma_enable must be 1 "
1980 "for SBP over FireWire.\n");
1982 printf("sbp_attach (cold=%d)\n", cold);
1987 sbp = ((struct sbp_softc *)device_get_softc(dev));
1988 bzero(sbp, sizeof(struct sbp_softc));
1990 sbp->fd.fc = fc = device_get_ivars(dev);
1991 mtx_init(&sbp->mtx, "sbp", NULL, MTX_DEF);
1994 max_speed = fc->speed;
1996 error = bus_dma_tag_create(/*parent*/fc->dmat,
1997 /* XXX shoud be 4 for sane backend? */
2000 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2001 /*highaddr*/BUS_SPACE_MAXADDR,
2002 /*filter*/NULL, /*filterarg*/NULL,
2003 /*maxsize*/0x100000, /*nsegments*/SBP_IND_MAX,
2004 /*maxsegsz*/SBP_SEG_MAX,
2005 /*flags*/BUS_DMA_ALLOCNOW,
2006 #if defined(__FreeBSD__) && __FreeBSD_version >= 501102
2007 /*lockfunc*/busdma_lock_mutex,
2008 /*lockarg*/&sbp->mtx,
2012 printf("sbp_attach: Could not allocate DMA tag "
2013 "- error %d\n", error);
2017 devq = cam_simq_alloc(/*maxopenings*/SBP_NUM_OCB);
2021 for( i = 0 ; i < SBP_NUM_TARGETS ; i++){
2022 sbp->targets[i].fwdev = NULL;
2023 sbp->targets[i].luns = NULL;
2026 sbp->sim = cam_sim_alloc(sbp_action, sbp_poll, "sbp", sbp,
2027 device_get_unit(dev),
2030 /*tagged*/ SBP_QUEUE_LEN - 1,
2033 if (sbp->sim == NULL) {
2034 cam_simq_free(devq);
2039 if (xpt_bus_register(sbp->sim, dev, /*bus*/0) != CAM_SUCCESS)
2042 if (xpt_create_path(&sbp->path, NULL, cam_sim_path(sbp->sim),
2043 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2044 xpt_bus_deregister(cam_sim_path(sbp->sim));
2049 /* We reserve 16 bit space (4 bytes X 64 targets X 256 luns) */
2050 sbp->fwb.start = ((u_int64_t)SBP_BIND_HI << 32) | SBP_DEV2ADDR(0, 0);
2051 sbp->fwb.end = sbp->fwb.start + 0xffff;
2052 /* pre-allocate xfer */
2053 STAILQ_INIT(&sbp->fwb.xferlist);
2054 fw_xferlist_add(&sbp->fwb.xferlist, M_SBP,
2055 /*send*/ 0, /*recv*/ SBP_RECV_LEN, SBP_NUM_OCB/2,
2056 fc, (void *)sbp, sbp_recv);
2058 fw_bindadd(fc, &sbp->fwb);
2060 sbp->fd.post_busreset = sbp_post_busreset;
2061 sbp->fd.post_explore = sbp_post_explore;
2063 if (fc->status != -1) {
2065 sbp_post_busreset((void *)sbp);
2066 sbp_post_explore((void *)sbp);
2070 xpt_async(AC_BUS_RESET, sbp->path, /*arg*/ NULL);
2076 cam_sim_free(sbp->sim, /*free_devq*/TRUE);
2081 sbp_logout_all(struct sbp_softc *sbp)
2083 struct sbp_target *target;
2084 struct sbp_dev *sdev;
2088 printf("sbp_logout_all\n");
2090 for (i = 0 ; i < SBP_NUM_TARGETS ; i ++) {
2091 target = &sbp->targets[i];
2092 if (target->luns == NULL)
2094 for (j = 0; j < target->num_lun; j++) {
2095 sdev = target->luns[j];
2098 callout_stop(&sdev->login_callout);
2099 if (sdev->status >= SBP_DEV_TOATTACH &&
2100 sdev->status <= SBP_DEV_ATTACHED)
2101 sbp_mgm_orb(sdev, ORB_FUN_LGO, NULL);
2109 sbp_shutdown(device_t dev)
2111 struct sbp_softc *sbp = ((struct sbp_softc *)device_get_softc(dev));
2113 sbp_logout_all(sbp);
2118 sbp_free_sdev(struct sbp_dev *sdev)
2124 for (i = 0; i < SBP_QUEUE_LEN; i++)
2125 bus_dmamap_destroy(sdev->target->sbp->dmat,
2126 sdev->ocb[i].dmamap);
2127 fwdma_free(sdev->target->sbp->fd.fc, &sdev->dma);
2133 sbp_free_target(struct sbp_target *target)
2135 struct sbp_softc *sbp;
2136 struct fw_xfer *xfer, *next;
2139 if (target->luns == NULL)
2141 callout_stop(&target->mgm_ocb_timeout);
2143 for (i = 0; i < target->num_lun; i++)
2144 sbp_free_sdev(target->luns[i]);
2146 for (xfer = STAILQ_FIRST(&target->xferlist);
2147 xfer != NULL; xfer = next) {
2148 next = STAILQ_NEXT(xfer, link);
2149 fw_xfer_free_buf(xfer);
2151 STAILQ_INIT(&target->xferlist);
2152 free(target->luns, M_SBP);
2153 target->num_lun = 0;
2154 target->luns = NULL;
2155 target->fwdev = NULL;
2159 sbp_detach(device_t dev)
2161 struct sbp_softc *sbp = ((struct sbp_softc *)device_get_softc(dev));
2162 struct firewire_comm *fc = sbp->fd.fc;
2166 printf("sbp_detach\n");
2169 for (i = 0; i < SBP_NUM_TARGETS; i ++)
2170 sbp_cam_detach_target(&sbp->targets[i]);
2173 xpt_async(AC_LOST_DEVICE, sbp->path, NULL);
2174 xpt_free_path(sbp->path);
2175 xpt_bus_deregister(cam_sim_path(sbp->sim));
2176 cam_sim_free(sbp->sim, /*free_devq*/ TRUE);
2179 sbp_logout_all(sbp);
2181 /* XXX wait for logout completion */
2182 pause("sbpdtc", hz/2);
2184 for (i = 0 ; i < SBP_NUM_TARGETS ; i ++)
2185 sbp_free_target(&sbp->targets[i]);
2187 fw_bindremove(fc, &sbp->fwb);
2188 fw_xferlist_remove(&sbp->fwb.xferlist);
2190 bus_dma_tag_destroy(sbp->dmat);
2191 mtx_destroy(&sbp->mtx);
2197 sbp_cam_detach_sdev(struct sbp_dev *sdev)
2201 if (sdev->status == SBP_DEV_DEAD)
2203 if (sdev->status == SBP_DEV_RESET)
2205 sbp_abort_all_ocbs(sdev, CAM_DEV_NOT_THERE);
2207 SBP_LOCK(sdev->target->sbp);
2208 xpt_release_devq(sdev->path,
2209 sdev->freeze, TRUE);
2211 xpt_async(AC_LOST_DEVICE, sdev->path, NULL);
2212 xpt_free_path(sdev->path);
2214 SBP_UNLOCK(sdev->target->sbp);
2219 sbp_cam_detach_target(struct sbp_target *target)
2223 if (target->luns != NULL) {
2225 printf("sbp_detach_target %d\n", target->target_id);
2227 callout_stop(&target->scan_callout);
2228 for (i = 0; i < target->num_lun; i++)
2229 sbp_cam_detach_sdev(target->luns[i]);
2234 sbp_target_reset(struct sbp_dev *sdev, int method)
2237 struct sbp_target *target = sdev->target;
2238 struct sbp_dev *tsdev;
2240 for (i = 0; i < target->num_lun; i++) {
2241 tsdev = target->luns[i];
2244 if (tsdev->status == SBP_DEV_DEAD)
2246 if (tsdev->status == SBP_DEV_RESET)
2248 SBP_LOCK(target->sbp);
2249 xpt_freeze_devq(tsdev->path, 1);
2251 SBP_UNLOCK(target->sbp);
2252 sbp_abort_all_ocbs(tsdev, CAM_CMD_TIMEOUT);
2254 tsdev->status = SBP_DEV_LOGIN;
2258 printf("target reset\n");
2259 sbp_mgm_orb(sdev, ORB_FUN_RST, NULL);
2262 printf("reset start\n");
2263 sbp_reset_start(sdev);
2270 sbp_mgm_timeout(void *arg)
2272 struct sbp_ocb *ocb = (struct sbp_ocb *)arg;
2273 struct sbp_dev *sdev = ocb->sdev;
2274 struct sbp_target *target = sdev->target;
2276 device_printf(sdev->target->sbp->fd.dev,
2277 "%s:%s request timeout(mgm orb:0x%08x)\n",
2278 __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr);
2279 target->mgm_ocb_cur = NULL;
2280 sbp_free_ocb(sdev, ocb);
2283 printf("run next request\n");
2284 sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL);
2286 device_printf(sdev->target->sbp->fd.dev,
2287 "%s:%s reset start\n",
2288 __func__, sdev->bustgtlun);
2289 sbp_reset_start(sdev);
2293 sbp_timeout(void *arg)
2295 struct sbp_ocb *ocb = (struct sbp_ocb *)arg;
2296 struct sbp_dev *sdev = ocb->sdev;
2298 device_printf(sdev->target->sbp->fd.dev,
2299 "%s:%s request timeout(cmd orb:0x%08x) ... ",
2300 __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr);
2303 switch(sdev->timeout) {
2305 printf("agent reset\n");
2306 SBP_LOCK(sdev->target->sbp);
2307 xpt_freeze_devq(sdev->path, 1);
2309 SBP_UNLOCK(sdev->target->sbp);
2310 sbp_abort_all_ocbs(sdev, CAM_CMD_TIMEOUT);
2311 sbp_agent_reset(sdev);
2315 sbp_target_reset(sdev, sdev->timeout - 1);
2320 sbp_cam_detach_target(target);
2321 if (target->luns != NULL)
2322 free(target->luns, M_SBP);
2323 target->num_lun = 0;
2324 target->luns = NULL;
2325 target->fwdev = NULL;
2331 sbp_action1(struct cam_sim *sim, union ccb *ccb)
2334 struct sbp_softc *sbp = (struct sbp_softc *)sim->softc;
2335 struct sbp_target *target = NULL;
2336 struct sbp_dev *sdev = NULL;
2338 /* target:lun -> sdev mapping */
2340 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD
2341 && ccb->ccb_h.target_id < SBP_NUM_TARGETS) {
2342 target = &sbp->targets[ccb->ccb_h.target_id];
2343 if (target->fwdev != NULL
2344 && ccb->ccb_h.target_lun != CAM_LUN_WILDCARD
2345 && ccb->ccb_h.target_lun < target->num_lun) {
2346 sdev = target->luns[ccb->ccb_h.target_lun];
2347 if (sdev != NULL && sdev->status != SBP_DEV_ATTACHED &&
2348 sdev->status != SBP_DEV_PROBE)
2355 printf("invalid target %d lun %d\n",
2356 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2359 switch (ccb->ccb_h.func_code) {
2362 case XPT_GET_TRAN_SETTINGS:
2363 case XPT_SET_TRAN_SETTINGS:
2364 case XPT_CALC_GEOMETRY:
2367 printf("%s:%d:%d:func_code 0x%04x: "
2368 "Invalid target (target needed)\n",
2369 device_get_nameunit(sbp->fd.dev),
2370 ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
2371 ccb->ccb_h.func_code);
2374 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2381 /* The opcodes sometimes aimed at a target (sc is valid),
2382 * sometimes aimed at the SIM (sc is invalid and target is
2383 * CAM_TARGET_WILDCARD)
2386 ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
2388 printf("%s:%d:%d func_code 0x%04x: "
2389 "Invalid target (no wildcard)\n",
2390 device_get_nameunit(sbp->fd.dev),
2391 ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
2392 ccb->ccb_h.func_code);
2394 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2400 /* XXX Hm, we should check the input parameters */
2404 switch (ccb->ccb_h.func_code) {
2407 struct ccb_scsiio *csio;
2408 struct sbp_ocb *ocb;
2413 mtx_assert(sim->mtx, MA_OWNED);
2416 printf("%s:%d:%d XPT_SCSI_IO: "
2417 "cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x"
2419 "%db cmd/%db data/%db sense\n",
2420 device_get_nameunit(sbp->fd.dev),
2421 ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
2422 csio->cdb_io.cdb_bytes[0],
2423 csio->cdb_io.cdb_bytes[1],
2424 csio->cdb_io.cdb_bytes[2],
2425 csio->cdb_io.cdb_bytes[3],
2426 csio->cdb_io.cdb_bytes[4],
2427 csio->cdb_io.cdb_bytes[5],
2428 csio->cdb_io.cdb_bytes[6],
2429 csio->cdb_io.cdb_bytes[7],
2430 csio->cdb_io.cdb_bytes[8],
2431 csio->cdb_io.cdb_bytes[9],
2432 ccb->ccb_h.flags & CAM_DIR_MASK,
2433 csio->cdb_len, csio->dxfer_len,
2437 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2442 /* if we are in probe stage, pass only probe commands */
2443 if (sdev->status == SBP_DEV_PROBE) {
2445 name = xpt_path_periph(ccb->ccb_h.path)->periph_name;
2446 printf("probe stage, periph name: %s\n", name);
2447 if (strcmp(name, "probe") != 0) {
2448 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2454 if ((ocb = sbp_get_ocb(sdev)) == NULL) {
2455 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2456 if (sdev->freeze == 0) {
2457 SBP_LOCK(sdev->target->sbp);
2458 xpt_freeze_devq(sdev->path, 1);
2460 SBP_UNLOCK(sdev->target->sbp);
2466 ocb->flags = OCB_ACT_CMD;
2469 ccb->ccb_h.ccb_sdev_ptr = sdev;
2470 ocb->orb[0] = htonl(1 << 31);
2472 ocb->orb[2] = htonl(((sbp->fd.fc->nodeid | FWLOCALBUS )<< 16) );
2473 ocb->orb[3] = htonl(ocb->bus_addr + IND_PTR_OFFSET);
2474 speed = min(target->fwdev->speed, max_speed);
2475 ocb->orb[4] = htonl(ORB_NOTIFY | ORB_CMD_SPD(speed)
2476 | ORB_CMD_MAXP(speed + 7));
2477 if((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN){
2478 ocb->orb[4] |= htonl(ORB_CMD_IN);
2481 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2482 cdb = (void *)csio->cdb_io.cdb_ptr;
2484 cdb = (void *)&csio->cdb_io.cdb_bytes;
2485 bcopy(cdb, (void *)&ocb->orb[5], csio->cdb_len);
2487 printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[0]), ntohl(ocb->orb[1]), ntohl(ocb->orb[2]), ntohl(ocb->orb[3]));
2488 printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[4]), ntohl(ocb->orb[5]), ntohl(ocb->orb[6]), ntohl(ocb->orb[7]));
2490 if (ccb->csio.dxfer_len > 0) {
2493 error = bus_dmamap_load_ccb(/*dma tag*/sbp->dmat,
2494 /*dma map*/ocb->dmamap,
2500 printf("sbp: bus_dmamap_load error %d\n", error);
2502 sbp_execute_ocb(ocb, NULL, 0, 0);
2505 case XPT_CALC_GEOMETRY:
2507 struct ccb_calc_geometry *ccg;
2508 #if defined(__DragonFly__) || __FreeBSD_version < 501100
2510 uint32_t secs_per_cylinder;
2515 if (ccg->block_size == 0) {
2516 printf("sbp_action1: block_size is 0.\n");
2517 ccb->ccb_h.status = CAM_REQ_INVALID;
2522 printf("%s:%d:%d:%d:XPT_CALC_GEOMETRY: "
2523 #if defined(__DragonFly__) || __FreeBSD_version < 500000
2524 "Volume size = %d\n",
2526 "Volume size = %jd\n",
2528 device_get_nameunit(sbp->fd.dev),
2529 cam_sim_path(sbp->sim),
2530 ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
2531 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
2537 #if defined(__DragonFly__) || __FreeBSD_version < 501100
2538 size_mb = ccg->volume_size
2539 / ((1024L * 1024L) / ccg->block_size);
2541 if (size_mb > 1024 && extended) {
2543 ccg->secs_per_track = 63;
2546 ccg->secs_per_track = 32;
2548 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2549 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2550 ccb->ccb_h.status = CAM_REQ_CMP;
2552 cam_calc_geometry(ccg, /*extended*/1);
2557 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
2561 printf("%s:%d:XPT_RESET_BUS: \n",
2562 device_get_nameunit(sbp->fd.dev), cam_sim_path(sbp->sim));
2565 ccb->ccb_h.status = CAM_REQ_INVALID;
2569 case XPT_PATH_INQ: /* Path routing inquiry */
2571 struct ccb_pathinq *cpi = &ccb->cpi;
2574 printf("%s:%d:%d XPT_PATH_INQ:.\n",
2575 device_get_nameunit(sbp->fd.dev),
2576 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2578 cpi->version_num = 1; /* XXX??? */
2579 cpi->hba_inquiry = PI_TAG_ABLE;
2580 cpi->target_sprt = 0;
2581 cpi->hba_misc = PIM_NOBUSRESET | PIM_NO_6_BYTE;
2582 cpi->hba_eng_cnt = 0;
2583 cpi->max_target = SBP_NUM_TARGETS - 1;
2584 cpi->max_lun = SBP_NUM_LUNS - 1;
2585 cpi->initiator_id = SBP_INITIATOR;
2586 cpi->bus_id = sim->bus_id;
2587 cpi->base_transfer_speed = 400 * 1000 / 8;
2588 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2589 strncpy(cpi->hba_vid, "SBP", HBA_IDLEN);
2590 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
2591 cpi->unit_number = sim->unit_number;
2592 cpi->transport = XPORT_SPI; /* XX should have a FireWire */
2593 cpi->transport_version = 2;
2594 cpi->protocol = PROTO_SCSI;
2595 cpi->protocol_version = SCSI_REV_2;
2597 cpi->ccb_h.status = CAM_REQ_CMP;
2601 case XPT_GET_TRAN_SETTINGS:
2603 struct ccb_trans_settings *cts = &ccb->cts;
2604 struct ccb_trans_settings_scsi *scsi =
2605 &cts->proto_specific.scsi;
2606 struct ccb_trans_settings_spi *spi =
2607 &cts->xport_specific.spi;
2609 cts->protocol = PROTO_SCSI;
2610 cts->protocol_version = SCSI_REV_2;
2611 cts->transport = XPORT_SPI; /* should have a FireWire */
2612 cts->transport_version = 2;
2613 spi->valid = CTS_SPI_VALID_DISC;
2614 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2615 scsi->valid = CTS_SCSI_VALID_TQ;
2616 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2618 printf("%s:%d:%d XPT_GET_TRAN_SETTINGS:.\n",
2619 device_get_nameunit(sbp->fd.dev),
2620 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2622 cts->ccb_h.status = CAM_REQ_CMP;
2627 ccb->ccb_h.status = CAM_UA_ABORT;
2630 case XPT_SET_TRAN_SETTINGS:
2633 ccb->ccb_h.status = CAM_REQ_INVALID;
2641 sbp_action(struct cam_sim *sim, union ccb *ccb)
2646 sbp_action1(sim, ccb);
2651 sbp_execute_ocb(void *arg, bus_dma_segment_t *segments, int seg, int error)
2654 struct sbp_ocb *ocb;
2655 struct sbp_ocb *prev;
2656 bus_dma_segment_t *s;
2659 printf("sbp_execute_ocb: error=%d\n", error);
2661 ocb = (struct sbp_ocb *)arg;
2664 printf("sbp_execute_ocb: seg %d", seg);
2665 for (i = 0; i < seg; i++)
2666 #if defined(__DragonFly__) || __FreeBSD_version < 500000
2667 printf(", %x:%d", segments[i].ds_addr, segments[i].ds_len);
2669 printf(", %jx:%jd", (uintmax_t)segments[i].ds_addr,
2670 (uintmax_t)segments[i].ds_len);
2676 /* direct pointer */
2678 if (s->ds_len > SBP_SEG_MAX)
2679 panic("ds_len > SBP_SEG_MAX, fix busdma code");
2680 ocb->orb[3] = htonl(s->ds_addr);
2681 ocb->orb[4] |= htonl(s->ds_len);
2682 } else if(seg > 1) {
2684 for (i = 0; i < seg; i++) {
2687 /* XXX LSI Logic "< 16 byte" bug might be hit */
2689 printf("sbp_execute_ocb: warning, "
2690 #if defined(__DragonFly__) || __FreeBSD_version < 500000
2691 "segment length(%d) is less than 16."
2693 "segment length(%zd) is less than 16."
2695 "(seg=%d/%d)\n", (size_t)s->ds_len, i+1, seg);
2697 if (s->ds_len > SBP_SEG_MAX)
2698 panic("ds_len > SBP_SEG_MAX, fix busdma code");
2699 ocb->ind_ptr[i].hi = htonl(s->ds_len << 16);
2700 ocb->ind_ptr[i].lo = htonl(s->ds_addr);
2702 ocb->orb[4] |= htonl(ORB_CMD_PTBL | seg);
2706 bus_dmamap_sync(ocb->sdev->target->sbp->dmat, ocb->dmamap,
2707 (ntohl(ocb->orb[4]) & ORB_CMD_IN) ?
2708 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2709 prev = sbp_enqueue_ocb(ocb->sdev, ocb);
2710 fwdma_sync(&ocb->sdev->dma, BUS_DMASYNC_PREWRITE);
2713 if (ocb->sdev->last_ocb != NULL)
2714 sbp_doorbell(ocb->sdev);
2716 sbp_orb_pointer(ocb->sdev, ocb);
2719 if (prev == NULL || (ocb->sdev->flags & ORB_LINK_DEAD) != 0) {
2720 ocb->sdev->flags &= ~ORB_LINK_DEAD;
2721 sbp_orb_pointer(ocb->sdev, ocb);
2727 sbp_poll(struct cam_sim *sim)
2729 struct sbp_softc *sbp;
2730 struct firewire_comm *fc;
2732 sbp = (struct sbp_softc *)sim->softc;
2735 fc->poll(fc, 0, -1);
2740 static struct sbp_ocb *
2741 sbp_dequeue_ocb(struct sbp_dev *sdev, struct sbp_status *sbp_status)
2743 struct sbp_ocb *ocb;
2744 struct sbp_ocb *next;
2745 int s = splfw(), order = 0;
2749 device_printf(sdev->target->sbp->fd.dev,
2750 #if defined(__DragonFly__) || __FreeBSD_version < 500000
2751 "%s:%s 0x%08lx src %d\n",
2753 "%s:%s 0x%08x src %d\n",
2755 __func__, sdev->bustgtlun, ntohl(sbp_status->orb_lo), sbp_status->src);
2757 SBP_LOCK(sdev->target->sbp);
2758 for (ocb = STAILQ_FIRST(&sdev->ocbs); ocb != NULL; ocb = next) {
2759 next = STAILQ_NEXT(ocb, ocb);
2761 if (OCB_MATCH(ocb, sbp_status)) {
2763 STAILQ_REMOVE(&sdev->ocbs, ocb, sbp_ocb, ocb);
2764 if (ocb->ccb != NULL)
2765 untimeout(sbp_timeout, (caddr_t)ocb,
2766 ocb->ccb->ccb_h.timeout_ch);
2767 if (ntohl(ocb->orb[4]) & 0xffff) {
2768 bus_dmamap_sync(sdev->target->sbp->dmat,
2770 (ntohl(ocb->orb[4]) & ORB_CMD_IN) ?
2771 BUS_DMASYNC_POSTREAD :
2772 BUS_DMASYNC_POSTWRITE);
2773 bus_dmamap_unload(sdev->target->sbp->dmat,
2776 if (!use_doorbell) {
2777 if (sbp_status->src == SRC_NO_NEXT) {
2779 sbp_orb_pointer(sdev, next);
2780 else if (order > 0) {
2782 * Unordered execution
2783 * We need to send pointer for
2786 sdev->flags |= ORB_LINK_DEAD;
2791 * XXX this is not correct for unordered
2794 if (sdev->last_ocb != NULL) {
2795 SBP_UNLOCK(sdev->target->sbp);
2796 sbp_free_ocb(sdev, sdev->last_ocb);
2797 SBP_LOCK(sdev->target->sbp);
2799 sdev->last_ocb = ocb;
2801 sbp_status->src == SRC_NO_NEXT)
2808 SBP_UNLOCK(sdev->target->sbp);
2811 if (ocb && order > 0) {
2812 device_printf(sdev->target->sbp->fd.dev,
2813 "%s:%s unordered execution order:%d\n",
2814 __func__, sdev->bustgtlun, order);
2820 static struct sbp_ocb *
2821 sbp_enqueue_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb)
2824 struct sbp_ocb *prev, *prev2;
2826 mtx_assert(&sdev->target->sbp->mtx, MA_OWNED);
2828 device_printf(sdev->target->sbp->fd.dev,
2829 #if defined(__DragonFly__) || __FreeBSD_version < 500000
2830 "%s:%s 0x%08x\n", __func__, sdev->bustgtlun, ocb->bus_addr);
2832 "%s:%s 0x%08jx\n", __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr);
2835 prev2 = prev = STAILQ_LAST(&sdev->ocbs, sbp_ocb, ocb);
2836 STAILQ_INSERT_TAIL(&sdev->ocbs, ocb, ocb);
2838 if (ocb->ccb != NULL)
2839 ocb->ccb->ccb_h.timeout_ch = timeout(sbp_timeout, (caddr_t)ocb,
2840 (ocb->ccb->ccb_h.timeout * hz) / 1000);
2842 if (use_doorbell && prev == NULL)
2843 prev2 = sdev->last_ocb;
2845 if (prev2 != NULL && (ocb->sdev->flags & ORB_LINK_DEAD) == 0) {
2847 #if defined(__DragonFly__) || __FreeBSD_version < 500000
2848 printf("linking chain 0x%x -> 0x%x\n",
2849 prev2->bus_addr, ocb->bus_addr);
2851 printf("linking chain 0x%jx -> 0x%jx\n",
2852 (uintmax_t)prev2->bus_addr, (uintmax_t)ocb->bus_addr);
2856 * Suppress compiler optimization so that orb[1] must be written first.
2857 * XXX We may need an explicit memory barrier for other architectures
2858 * other than i386/amd64.
2860 *(volatile uint32_t *)&prev2->orb[1] = htonl(ocb->bus_addr);
2861 *(volatile uint32_t *)&prev2->orb[0] = 0;
2868 static struct sbp_ocb *
2869 sbp_get_ocb(struct sbp_dev *sdev)
2871 struct sbp_ocb *ocb;
2874 mtx_assert(&sdev->target->sbp->mtx, MA_OWNED);
2875 ocb = STAILQ_FIRST(&sdev->free_ocbs);
2877 sdev->flags |= ORB_SHORTAGE;
2878 printf("ocb shortage!!!\n");
2882 STAILQ_REMOVE_HEAD(&sdev->free_ocbs, ocb);
2889 sbp_free_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb)
2894 SBP_LOCK(sdev->target->sbp);
2895 STAILQ_INSERT_TAIL(&sdev->free_ocbs, ocb, ocb);
2896 if ((sdev->flags & ORB_SHORTAGE) != 0) {
2899 sdev->flags &= ~ORB_SHORTAGE;
2900 count = sdev->freeze;
2902 xpt_release_devq(sdev->path, count, TRUE);
2904 SBP_UNLOCK(sdev->target->sbp);
2908 sbp_abort_ocb(struct sbp_ocb *ocb, int status)
2910 struct sbp_dev *sdev;
2914 device_printf(sdev->target->sbp->fd.dev,
2915 #if defined(__DragonFly__) || __FreeBSD_version < 500000
2916 "%s:%s 0x%x\n", __func__, sdev->bustgtlun, ocb->bus_addr);
2918 "%s:%s 0x%jx\n", __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr);
2922 if (ocb->ccb != NULL)
2923 sbp_print_scsi_cmd(ocb);
2925 if (ntohl(ocb->orb[4]) & 0xffff) {
2926 bus_dmamap_sync(sdev->target->sbp->dmat, ocb->dmamap,
2927 (ntohl(ocb->orb[4]) & ORB_CMD_IN) ?
2928 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2929 bus_dmamap_unload(sdev->target->sbp->dmat, ocb->dmamap);
2931 if (ocb->ccb != NULL) {
2932 untimeout(sbp_timeout, (caddr_t)ocb,
2933 ocb->ccb->ccb_h.timeout_ch);
2934 ocb->ccb->ccb_h.status = status;
2935 SBP_LOCK(sdev->target->sbp);
2937 SBP_UNLOCK(sdev->target->sbp);
2939 sbp_free_ocb(sdev, ocb);
2943 sbp_abort_all_ocbs(struct sbp_dev *sdev, int status)
2946 struct sbp_ocb *ocb, *next;
2947 STAILQ_HEAD(, sbp_ocb) temp;
2952 SBP_LOCK(sdev->target->sbp);
2953 STAILQ_CONCAT(&temp, &sdev->ocbs);
2954 STAILQ_INIT(&sdev->ocbs);
2955 SBP_UNLOCK(sdev->target->sbp);
2957 for (ocb = STAILQ_FIRST(&temp); ocb != NULL; ocb = next) {
2958 next = STAILQ_NEXT(ocb, ocb);
2959 sbp_abort_ocb(ocb, status);
2961 if (sdev->last_ocb != NULL) {
2962 sbp_free_ocb(sdev, sdev->last_ocb);
2963 sdev->last_ocb = NULL;
2969 static devclass_t sbp_devclass;
2971 static device_method_t sbp_methods[] = {
2972 /* device interface */
2973 DEVMETHOD(device_identify, sbp_identify),
2974 DEVMETHOD(device_probe, sbp_probe),
2975 DEVMETHOD(device_attach, sbp_attach),
2976 DEVMETHOD(device_detach, sbp_detach),
2977 DEVMETHOD(device_shutdown, sbp_shutdown),
2982 static driver_t sbp_driver = {
2985 sizeof(struct sbp_softc),
2987 #ifdef __DragonFly__
2988 DECLARE_DUMMY_MODULE(sbp);
2990 DRIVER_MODULE(sbp, firewire, sbp_driver, sbp_devclass, 0, 0);
2991 MODULE_VERSION(sbp, 1);
2992 MODULE_DEPEND(sbp, firewire, 1, 1, 1);
2993 MODULE_DEPEND(sbp, cam, 1, 1, 1);