2 * Copyright (c) 2003 Hidetoshi Shimokawa
3 * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the acknowledgement as bellow:
17 * This product includes software developed by K. Kobayashi and H. Shimokawa
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
26 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
27 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/module.h>
42 #include <sys/kernel.h>
43 #include <sys/sysctl.h>
44 #include <machine/bus.h>
45 #include <sys/malloc.h>
46 #if defined(__FreeBSD__) && __FreeBSD_version >= 501102
48 #include <sys/mutex.h>
51 #if defined(__DragonFly__) || __FreeBSD_version < 500106
52 #include <sys/devicestat.h> /* for struct devstat */
56 #include <bus/cam/cam.h>
57 #include <bus/cam/cam_ccb.h>
58 #include <bus/cam/cam_sim.h>
59 #include <bus/cam/cam_xpt_sim.h>
60 #include <bus/cam/cam_debug.h>
61 #include <bus/cam/cam_periph.h>
62 #include <bus/cam/scsi/scsi_all.h>
64 #include <bus/firewire/firewire.h>
65 #include <bus/firewire/firewirereg.h>
66 #include <bus/firewire/fwdma.h>
67 #include <bus/firewire/iec13213.h>
71 #include <cam/cam_ccb.h>
72 #include <cam/cam_sim.h>
73 #include <cam/cam_xpt_sim.h>
74 #include <cam/cam_debug.h>
75 #include <cam/cam_periph.h>
76 #include <cam/scsi/scsi_all.h>
78 #include <dev/firewire/firewire.h>
79 #include <dev/firewire/firewirereg.h>
80 #include <dev/firewire/fwdma.h>
81 #include <dev/firewire/iec13213.h>
82 #include <dev/firewire/sbp.h>
85 #define ccb_sdev_ptr spriv_ptr0
86 #define ccb_sbp_ptr spriv_ptr1
88 #define SBP_NUM_TARGETS 8 /* MAX 64 */
90 * Scan_bus doesn't work for more than 8 LUNs
91 * because of CAM_SCSI2_MAXLUN in cam_xpt.c
93 #define SBP_NUM_LUNS 64
94 #define SBP_MAXPHYS MIN(MAXPHYS, (512*1024) /* 512KB */)
95 #define SBP_DMA_SIZE PAGE_SIZE
96 #define SBP_LOGIN_SIZE sizeof(struct sbp_login_res)
97 #define SBP_QUEUE_LEN ((SBP_DMA_SIZE - SBP_LOGIN_SIZE) / sizeof(struct sbp_ocb))
98 #define SBP_NUM_OCB (SBP_QUEUE_LEN * SBP_NUM_TARGETS)
101 * STATUS FIFO addressing
103 * -----------------------
104 * 0- 1( 2): 0 (alignment)
107 * 16-31( 8): reserved
108 * 32-47(16): SBP_BIND_HI
109 * 48-64(16): bus_id, node_id
111 #define SBP_BIND_HI 0x1
112 #define SBP_DEV2ADDR(t, l) \
113 (((u_int64_t)SBP_BIND_HI << 32) \
114 | (((l) & 0xff) << 8) \
115 | (((t) & 0x3f) << 2))
116 #define SBP_ADDR2TRG(a) (((a) >> 2) & 0x3f)
117 #define SBP_ADDR2LUN(a) (((a) >> 8) & 0xff)
118 #define SBP_INITIATOR 7
120 static char *orb_fun_name[] = {
124 static int debug = 0;
125 static int auto_login = 1;
126 static int max_speed = -1;
127 static int sbp_cold = 1;
128 static int ex_login = 1;
129 static int login_delay = 1000; /* msec */
130 static int scan_delay = 500; /* msec */
131 static int use_doorbell = 0;
132 static int sbp_tags = 0;
134 SYSCTL_DECL(_hw_firewire);
135 static SYSCTL_NODE(_hw_firewire, OID_AUTO, sbp, CTLFLAG_RD, 0,
137 SYSCTL_INT(_debug, OID_AUTO, sbp_debug, CTLFLAG_RW, &debug, 0,
139 SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, auto_login, CTLFLAG_RW, &auto_login, 0,
140 "SBP perform login automatically");
141 SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, max_speed, CTLFLAG_RW, &max_speed, 0,
142 "SBP transfer max speed");
143 SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, exclusive_login, CTLFLAG_RW,
144 &ex_login, 0, "SBP enable exclusive login");
145 SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, login_delay, CTLFLAG_RW,
146 &login_delay, 0, "SBP login delay in msec");
147 SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, scan_delay, CTLFLAG_RW,
148 &scan_delay, 0, "SBP scan delay in msec");
149 SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, use_doorbell, CTLFLAG_RW,
150 &use_doorbell, 0, "SBP use doorbell request");
151 SYSCTL_INT(_hw_firewire_sbp, OID_AUTO, tags, CTLFLAG_RW, &sbp_tags, 0,
152 "SBP tagged queuing support");
154 TUNABLE_INT("hw.firewire.sbp.auto_login", &auto_login);
155 TUNABLE_INT("hw.firewire.sbp.max_speed", &max_speed);
156 TUNABLE_INT("hw.firewire.sbp.exclusive_login", &ex_login);
157 TUNABLE_INT("hw.firewire.sbp.login_delay", &login_delay);
158 TUNABLE_INT("hw.firewire.sbp.scan_delay", &scan_delay);
159 TUNABLE_INT("hw.firewire.sbp.use_doorbell", &use_doorbell);
160 TUNABLE_INT("hw.firewire.sbp.tags", &sbp_tags);
162 #define NEED_RESPONSE 0
164 #define SBP_SEG_MAX rounddown(0xffff, PAGE_SIZE)
165 #ifdef __sparc64__ /* iommu */
166 #define SBP_IND_MAX howmany(SBP_MAXPHYS, SBP_SEG_MAX)
168 #define SBP_IND_MAX howmany(SBP_MAXPHYS, PAGE_SIZE)
171 STAILQ_ENTRY(sbp_ocb) ocb;
175 #define IND_PTR_OFFSET (8*sizeof(uint32_t))
176 struct ind_ptr ind_ptr[SBP_IND_MAX];
177 struct sbp_dev *sdev;
178 int flags; /* XXX should be removed */
180 struct callout timer;
183 #define OCB_ACT_MGM 0
184 #define OCB_ACT_CMD 1
185 #define OCB_MATCH(o,s) ((o)->bus_addr == ntohl((s)->orb_lo))
188 #define SBP_DEV_RESET 0 /* accept login */
189 #define SBP_DEV_LOGIN 1 /* to login */
191 #define SBP_DEV_RECONN 2 /* to reconnect */
193 #define SBP_DEV_TOATTACH 3 /* to attach */
194 #define SBP_DEV_PROBE 4 /* scan lun */
195 #define SBP_DEV_ATTACHED 5 /* in operation */
196 #define SBP_DEV_DEAD 6 /* unavailable unit */
197 #define SBP_DEV_RETRY 7 /* unavailable unit */
203 #define ORB_LINK_DEAD (1 << 0)
204 #define VALID_LUN (1 << 1)
205 #define ORB_POINTER_ACTIVE (1 << 2)
206 #define ORB_POINTER_NEED (1 << 3)
207 #define ORB_DOORBELL_ACTIVE (1 << 4)
208 #define ORB_DOORBELL_NEED (1 << 5)
209 #define ORB_SHORTAGE (1 << 6)
211 struct cam_path *path;
212 struct sbp_target *target;
213 struct fwdma_alloc dma;
214 struct sbp_login_res *login;
215 struct callout login_callout;
217 STAILQ_HEAD(, sbp_ocb) ocbs;
218 STAILQ_HEAD(, sbp_ocb) free_ocbs;
219 struct sbp_ocb *last_ocb;
229 struct sbp_dev **luns;
230 struct sbp_softc *sbp;
231 struct fw_device *fwdev;
232 uint32_t mgm_hi, mgm_lo;
233 struct sbp_ocb *mgm_ocb_cur;
234 STAILQ_HEAD(, sbp_ocb) mgm_ocb_queue;
235 struct callout mgm_ocb_timeout;
236 struct callout scan_callout;
237 STAILQ_HEAD(, fw_xfer) xferlist;
242 struct firewire_dev_comm fd;
244 struct cam_path *path;
245 struct sbp_target targets[SBP_NUM_TARGETS];
248 struct timeval last_busreset;
249 #define SIMQ_FREEZED 1
253 #define SBP_LOCK(sbp) mtx_lock(&(sbp)->mtx)
254 #define SBP_UNLOCK(sbp) mtx_unlock(&(sbp)->mtx)
255 #define SBP_LOCK_ASSERT(sbp) mtx_assert(&(sbp)->mtx, MA_OWNED)
257 static void sbp_post_explore (void *);
258 static void sbp_recv (struct fw_xfer *);
259 static void sbp_mgm_callback (struct fw_xfer *);
261 static void sbp_cmd_callback (struct fw_xfer *);
263 static void sbp_orb_pointer (struct sbp_dev *, struct sbp_ocb *);
264 static void sbp_doorbell(struct sbp_dev *);
265 static void sbp_execute_ocb (void *, bus_dma_segment_t *, int, int);
266 static void sbp_free_ocb (struct sbp_dev *, struct sbp_ocb *);
267 static void sbp_abort_ocb (struct sbp_ocb *, int);
268 static void sbp_abort_all_ocbs (struct sbp_dev *, int);
269 static struct fw_xfer * sbp_write_cmd (struct sbp_dev *, int, int);
270 static struct sbp_ocb * sbp_get_ocb (struct sbp_dev *);
271 static struct sbp_ocb * sbp_enqueue_ocb (struct sbp_dev *, struct sbp_ocb *);
272 static struct sbp_ocb * sbp_dequeue_ocb (struct sbp_dev *, struct sbp_status *);
273 static void sbp_cam_detach_sdev(struct sbp_dev *);
274 static void sbp_free_sdev(struct sbp_dev *);
275 static void sbp_cam_detach_target (struct sbp_target *);
276 static void sbp_free_target (struct sbp_target *);
277 static void sbp_mgm_timeout (void *arg);
278 static void sbp_timeout (void *arg);
279 static void sbp_mgm_orb (struct sbp_dev *, int, struct sbp_ocb *);
281 static MALLOC_DEFINE(M_SBP, "sbp", "SBP-II/FireWire");
283 /* cam related functions */
284 static void sbp_action(struct cam_sim *sim, union ccb *ccb);
285 static void sbp_poll(struct cam_sim *sim);
286 static void sbp_cam_scan_lun(struct cam_periph *, union ccb *);
287 static void sbp_cam_scan_target(void *arg);
289 static char *orb_status0[] = {
290 /* 0 */ "No additional information to report",
291 /* 1 */ "Request type not supported",
292 /* 2 */ "Speed not supported",
293 /* 3 */ "Page size not supported",
294 /* 4 */ "Access denied",
295 /* 5 */ "Logical unit not supported",
296 /* 6 */ "Maximum payload too small",
297 /* 7 */ "Reserved for future standardization",
298 /* 8 */ "Resources unavailable",
299 /* 9 */ "Function rejected",
300 /* A */ "Login ID not recognized",
301 /* B */ "Dummy ORB completed",
302 /* C */ "Request aborted",
303 /* FF */ "Unspecified error"
304 #define MAX_ORB_STATUS0 0xd
307 static char *orb_status1_object[] = {
308 /* 0 */ "Operation request block (ORB)",
309 /* 1 */ "Data buffer",
310 /* 2 */ "Page table",
311 /* 3 */ "Unable to specify"
314 static char *orb_status1_serial_bus_error[] = {
315 /* 0 */ "Missing acknowledge",
316 /* 1 */ "Reserved; not to be used",
317 /* 2 */ "Time-out error",
318 /* 3 */ "Reserved; not to be used",
319 /* 4 */ "Busy retry limit exceeded(X)",
320 /* 5 */ "Busy retry limit exceeded(A)",
321 /* 6 */ "Busy retry limit exceeded(B)",
322 /* 7 */ "Reserved for future standardization",
323 /* 8 */ "Reserved for future standardization",
324 /* 9 */ "Reserved for future standardization",
325 /* A */ "Reserved for future standardization",
326 /* B */ "Tardy retry limit exceeded",
327 /* C */ "Conflict error",
328 /* D */ "Data error",
329 /* E */ "Type error",
330 /* F */ "Address error"
334 sbp_identify(driver_t *driver, device_t parent)
337 printf("sbp_identify\n");
340 if (device_find_child(parent, "sbp", -1) == NULL)
341 BUS_ADD_CHILD(parent, 0, "sbp", -1);
348 sbp_probe(device_t dev)
352 printf("sbp_probe\n");
355 device_set_desc(dev, "SBP-2/SCSI over FireWire");
366 * Display device characteristics on the console
369 sbp_show_sdev_info(struct sbp_dev *sdev)
371 struct fw_device *fwdev;
373 fwdev = sdev->target->fwdev;
374 device_printf(sdev->target->sbp->fd.dev,
375 "%s: %s: ordered:%d type:%d EUI:%08x%08x node:%d "
376 "speed:%d maxrec:%d\n",
379 (sdev->type & 0x40) >> 6,
387 device_printf(sdev->target->sbp->fd.dev,
388 "%s: %s '%s' '%s' '%s'\n",
401 /* Bus Target EUI64 */
403 {0, 2, {0x00018ea0, 0x01fd0154}}, /* Logitec HDD */
404 {0, 0, {0x00018ea6, 0x00100682}}, /* Logitec DVD */
405 {0, 1, {0x00d03200, 0xa412006a}}, /* Yano HDD */
411 sbp_new_target(struct sbp_softc *sbp, struct fw_device *fwdev)
413 int bus, i, target=-1;
414 char w[SBP_NUM_TARGETS];
417 bus = device_get_unit(sbp->fd.dev);
419 /* XXX wired-down configuration should be gotten from
420 tunable or device hint */
421 for (i = 0; wired[i].bus >= 0; i ++) {
422 if (wired[i].bus == bus) {
423 w[wired[i].target] = 1;
424 if (wired[i].eui.hi == fwdev->eui.hi &&
425 wired[i].eui.lo == fwdev->eui.lo)
426 target = wired[i].target;
430 if(target < SBP_NUM_TARGETS &&
431 sbp->targets[target].fwdev == NULL)
433 device_printf(sbp->fd.dev,
434 "target %d is not free for %08x:%08x\n",
435 target, fwdev->eui.hi, fwdev->eui.lo);
438 /* non-wired target */
439 for (i = 0; i < SBP_NUM_TARGETS; i ++)
440 if (sbp->targets[i].fwdev == NULL && w[i] == 0) {
449 sbp_alloc_lun(struct sbp_target *target)
451 struct crom_context cc;
453 struct sbp_dev *sdev, **newluns;
454 struct sbp_softc *sbp;
458 SBP_LOCK_ASSERT(sbp);
459 crom_init_context(&cc, target->fwdev->csrrom);
460 /* XXX shoud parse appropriate unit directories only */
462 while (cc.depth >= 0) {
463 reg = crom_search_key(&cc, CROM_LUN);
466 lun = reg->val & 0xffff;
468 printf("target %d lun %d found\n", target->target_id, lun);
475 device_printf(target->sbp->fd.dev, "%d no LUN found\n",
479 if (maxlun >= SBP_NUM_LUNS)
480 maxlun = SBP_NUM_LUNS;
482 /* Invalidiate stale devices */
483 for (lun = 0; lun < target->num_lun; lun ++) {
484 sdev = target->luns[lun];
487 sdev->flags &= ~VALID_LUN;
490 sbp_cam_detach_sdev(sdev);
492 target->luns[lun] = NULL;
497 if (maxlun != target->num_lun) {
498 newluns = (struct sbp_dev **) realloc(target->luns,
499 sizeof(struct sbp_dev *) * maxlun,
500 M_SBP, M_NOWAIT | M_ZERO);
502 if (newluns == NULL) {
503 printf("%s: realloc failed\n", __func__);
504 newluns = target->luns;
505 maxlun = target->num_lun;
509 * We must zero the extended region for the case
510 * realloc() doesn't allocate new buffer.
512 if (maxlun > target->num_lun)
513 bzero(&newluns[target->num_lun],
514 sizeof(struct sbp_dev *) *
515 (maxlun - target->num_lun));
517 target->luns = newluns;
518 target->num_lun = maxlun;
521 crom_init_context(&cc, target->fwdev->csrrom);
522 while (cc.depth >= 0) {
525 reg = crom_search_key(&cc, CROM_LUN);
528 lun = reg->val & 0xffff;
529 if (lun >= SBP_NUM_LUNS) {
530 printf("too large lun %d\n", lun);
534 sdev = target->luns[lun];
536 sdev = malloc(sizeof(struct sbp_dev),
537 M_SBP, M_NOWAIT | M_ZERO);
539 printf("%s: malloc failed\n", __func__);
542 target->luns[lun] = sdev;
544 sdev->target = target;
545 STAILQ_INIT(&sdev->ocbs);
546 callout_init_mtx(&sdev->login_callout, &sbp->mtx, 0);
547 sdev->status = SBP_DEV_RESET;
549 snprintf(sdev->bustgtlun, 32, "%s:%d:%d",
550 device_get_nameunit(sdev->target->sbp->fd.dev),
551 sdev->target->target_id,
554 sdev->flags |= VALID_LUN;
555 sdev->type = (reg->val & 0xff0000) >> 16;
560 fwdma_malloc(sbp->fd.fc,
561 /* alignment */ sizeof(uint32_t),
562 SBP_DMA_SIZE, &sdev->dma, BUS_DMA_NOWAIT |
564 if (sdev->dma.v_addr == NULL) {
565 printf("%s: dma space allocation failed\n",
568 target->luns[lun] = NULL;
571 sdev->login = (struct sbp_login_res *) sdev->dma.v_addr;
572 sdev->ocb = (struct sbp_ocb *)
573 ((char *)sdev->dma.v_addr + SBP_LOGIN_SIZE);
574 bzero((char *)sdev->ocb,
575 sizeof (struct sbp_ocb) * SBP_QUEUE_LEN);
577 STAILQ_INIT(&sdev->free_ocbs);
578 for (i = 0; i < SBP_QUEUE_LEN; i++) {
581 ocb->bus_addr = sdev->dma.bus_addr
583 + sizeof(struct sbp_ocb) * i
584 + offsetof(struct sbp_ocb, orb[0]);
585 if (bus_dmamap_create(sbp->dmat, 0, &ocb->dmamap)) {
586 printf("sbp_attach: cannot create dmamap\n");
590 callout_init_mtx(&ocb->timer, &sbp->mtx, 0);
591 sbp_free_ocb(sdev, ocb);
597 for (lun = 0; lun < target->num_lun; lun ++) {
598 sdev = target->luns[lun];
599 if (sdev != NULL && (sdev->flags & VALID_LUN) == 0) {
600 sbp_cam_detach_sdev(sdev);
602 target->luns[lun] = NULL;
607 static struct sbp_target *
608 sbp_alloc_target(struct sbp_softc *sbp, struct fw_device *fwdev)
611 struct sbp_target *target;
612 struct crom_context cc;
616 printf("sbp_alloc_target\n");
618 i = sbp_new_target(sbp, fwdev);
620 device_printf(sbp->fd.dev, "increase SBP_NUM_TARGETS!\n");
624 target = &sbp->targets[i];
626 target->fwdev = fwdev;
627 target->target_id = i;
628 /* XXX we may want to reload mgm port after each bus reset */
629 /* XXX there might be multiple management agents */
630 crom_init_context(&cc, target->fwdev->csrrom);
631 reg = crom_search_key(&cc, CROM_MGM);
632 if (reg == NULL || reg->val == 0) {
633 printf("NULL management address\n");
634 target->fwdev = NULL;
637 target->mgm_hi = 0xffff;
638 target->mgm_lo = 0xf0000000 | (reg->val << 2);
639 target->mgm_ocb_cur = NULL;
641 printf("target:%d mgm_port: %x\n", i, target->mgm_lo);
643 STAILQ_INIT(&target->xferlist);
645 STAILQ_INIT(&target->mgm_ocb_queue);
646 callout_init_mtx(&target->mgm_ocb_timeout, &sbp->mtx, 0);
647 callout_init_mtx(&target->scan_callout, &sbp->mtx, 0);
655 sbp_probe_lun(struct sbp_dev *sdev)
657 struct fw_device *fwdev;
658 struct crom_context c, *cc = &c;
661 bzero(sdev->vendor, sizeof(sdev->vendor));
662 bzero(sdev->product, sizeof(sdev->product));
664 fwdev = sdev->target->fwdev;
665 crom_init_context(cc, fwdev->csrrom);
666 /* get vendor string */
667 crom_search_key(cc, CSRKEY_VENDOR);
669 crom_parse_text(cc, sdev->vendor, sizeof(sdev->vendor));
670 /* skip to the unit directory for SBP-2 */
671 while ((reg = crom_search_key(cc, CSRKEY_VER)) != NULL) {
672 if (reg->val == CSRVAL_T10SBP2)
676 /* get firmware revision */
677 reg = crom_search_key(cc, CSRKEY_FIRM_VER);
679 snprintf(sdev->revision, sizeof(sdev->revision),
681 /* get product string */
682 crom_search_key(cc, CSRKEY_MODEL);
684 crom_parse_text(cc, sdev->product, sizeof(sdev->product));
688 sbp_login_callout(void *arg)
690 struct sbp_dev *sdev = (struct sbp_dev *)arg;
691 SBP_LOCK_ASSERT(sdev->target->sbp);
692 sbp_mgm_orb(sdev, ORB_FUN_LGI, NULL);
696 sbp_login(struct sbp_dev *sdev)
698 struct timeval delta;
703 timevalsub(&delta, &sdev->target->sbp->last_busreset);
704 t.tv_sec = login_delay / 1000;
705 t.tv_usec = (login_delay % 1000) * 1000;
706 timevalsub(&t, &delta);
707 if (t.tv_sec >= 0 && t.tv_usec > 0)
708 ticks = (t.tv_sec * 1000 + t.tv_usec / 1000) * hz / 1000;
710 printf("%s: sec = %jd usec = %ld ticks = %d\n", __func__,
711 (intmax_t)t.tv_sec, t.tv_usec, ticks);
713 callout_reset(&sdev->login_callout, ticks,
714 sbp_login_callout, (void *)(sdev));
717 #define SBP_FWDEV_ALIVE(fwdev) (((fwdev)->status == FWDEVATTACHED) \
718 && crom_has_specver((fwdev)->csrrom, CSRVAL_ANSIT10, CSRVAL_T10SBP2))
721 sbp_probe_target(void *arg)
723 struct sbp_target *target = (struct sbp_target *)arg;
724 struct sbp_softc *sbp = target->sbp;
725 struct sbp_dev *sdev;
728 alive = SBP_FWDEV_ALIVE(target->fwdev);
730 device_printf(sbp->fd.dev, "%s %d%salive\n",
731 __func__, target->target_id,
732 (!alive) ? " not " : "");
736 SBP_LOCK_ASSERT(sbp);
737 sbp_alloc_lun(target);
739 /* XXX untimeout mgm_ocb and dequeue */
740 for (i=0; i < target->num_lun; i++) {
741 sdev = target->luns[i];
744 if (alive && (sdev->status != SBP_DEV_DEAD)) {
745 if (sdev->path != NULL) {
746 xpt_freeze_devq(sdev->path, 1);
750 sbp_show_sdev_info(sdev);
752 sbp_abort_all_ocbs(sdev, CAM_SCSI_BUS_RESET);
753 switch (sdev->status) {
755 /* new or revived target */
759 case SBP_DEV_TOATTACH:
761 case SBP_DEV_ATTACHED:
764 sbp_mgm_orb(sdev, ORB_FUN_RCN, NULL);
768 switch (sdev->status) {
769 case SBP_DEV_ATTACHED:
771 /* the device has gone */
772 device_printf(sbp->fd.dev, "%s: lost target\n",
776 xpt_freeze_devq(sdev->path, 1);
779 sdev->status = SBP_DEV_RETRY;
780 sbp_cam_detach_sdev(sdev);
782 target->luns[i] = NULL;
785 case SBP_DEV_TOATTACH:
786 sdev->status = SBP_DEV_RESET;
798 sbp_post_busreset(void *arg)
800 struct sbp_softc *sbp;
802 sbp = (struct sbp_softc *)arg;
804 printf("sbp_post_busreset\n");
807 if ((sbp->sim->flags & SIMQ_FREEZED) == 0) {
808 xpt_freeze_simq(sbp->sim, /*count*/1);
809 sbp->sim->flags |= SIMQ_FREEZED;
811 microtime(&sbp->last_busreset);
816 sbp_post_explore(void *arg)
818 struct sbp_softc *sbp = (struct sbp_softc *)arg;
819 struct sbp_target *target;
820 struct fw_device *fwdev;
824 printf("sbp_post_explore (sbp_cold=%d)\n", sbp_cold);
826 /* We need physical access */
827 if (!firewire_phydma_enable)
836 * XXX don't let CAM the bus rest.
837 * CAM tries to do something with freezed (DEV_RETRY) devices.
839 xpt_async(AC_BUS_RESET, sbp->path, /*arg*/ NULL);
842 /* Garbage Collection */
843 for(i = 0 ; i < SBP_NUM_TARGETS ; i ++){
844 target = &sbp->targets[i];
845 STAILQ_FOREACH(fwdev, &sbp->fd.fc->devices, link)
846 if (target->fwdev == NULL || target->fwdev == fwdev)
849 /* device has removed in lower driver */
850 sbp_cam_detach_target(target);
851 sbp_free_target(target);
854 /* traverse device list */
855 STAILQ_FOREACH(fwdev, &sbp->fd.fc->devices, link) {
857 device_printf(sbp->fd.dev,"%s:: EUI:%08x%08x %s attached, state=%d\n",
858 __func__, fwdev->eui.hi, fwdev->eui.lo,
859 (fwdev->status != FWDEVATTACHED) ? "not" : "",
862 alive = SBP_FWDEV_ALIVE(fwdev);
863 for(i = 0 ; i < SBP_NUM_TARGETS ; i ++){
864 target = &sbp->targets[i];
865 if(target->fwdev == fwdev ) {
870 if(i == SBP_NUM_TARGETS){
873 target = sbp_alloc_target(sbp, fwdev);
880 sbp_probe_target((void *)target);
881 if (target->num_lun == 0)
882 sbp_free_target(target);
884 xpt_release_simq(sbp->sim, /*run queue*/TRUE);
885 sbp->sim->flags &= ~SIMQ_FREEZED;
891 sbp_loginres_callback(struct fw_xfer *xfer){
892 struct sbp_dev *sdev;
893 sdev = (struct sbp_dev *)xfer->sc;
895 device_printf(sdev->target->sbp->fd.dev,"%s\n", __func__);
898 SBP_LOCK(sdev->target->sbp);
899 STAILQ_INSERT_TAIL(&sdev->target->sbp->fwb.xferlist, xfer, link);
900 SBP_UNLOCK(sdev->target->sbp);
906 sbp_xfer_free(struct fw_xfer *xfer)
908 struct sbp_dev *sdev;
910 sdev = (struct sbp_dev *)xfer->sc;
911 fw_xfer_unload(xfer);
912 SBP_LOCK_ASSERT(sdev->target->sbp);
913 STAILQ_INSERT_TAIL(&sdev->target->xferlist, xfer, link);
917 sbp_reset_start_callback(struct fw_xfer *xfer)
919 struct sbp_dev *tsdev, *sdev = (struct sbp_dev *)xfer->sc;
920 struct sbp_target *target = sdev->target;
923 if (xfer->resp != 0) {
924 device_printf(sdev->target->sbp->fd.dev,
925 "%s: %s failed: resp=%d\n", __func__, sdev->bustgtlun, xfer->resp);
928 SBP_LOCK(target->sbp);
929 for (i = 0; i < target->num_lun; i++) {
930 tsdev = target->luns[i];
931 if (tsdev != NULL && tsdev->status == SBP_DEV_LOGIN)
934 SBP_UNLOCK(target->sbp);
938 sbp_reset_start(struct sbp_dev *sdev)
940 struct fw_xfer *xfer;
944 device_printf(sdev->target->sbp->fd.dev,
945 "%s:%s\n", __func__,sdev->bustgtlun);
948 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0);
949 xfer->hand = sbp_reset_start_callback;
950 fp = &xfer->send.hdr;
951 fp->mode.wreqq.dest_hi = 0xffff;
952 fp->mode.wreqq.dest_lo = 0xf0000000 | RESET_START;
953 fp->mode.wreqq.data = htonl(0xf);
954 fw_asyreq(xfer->fc, -1, xfer);
958 sbp_mgm_callback(struct fw_xfer *xfer)
960 struct sbp_dev *sdev;
963 sdev = (struct sbp_dev *)xfer->sc;
966 device_printf(sdev->target->sbp->fd.dev,
967 "%s:%s\n", __func__, sdev->bustgtlun);
970 SBP_LOCK(sdev->target->sbp);
972 SBP_UNLOCK(sdev->target->sbp);
975 static struct sbp_dev *
976 sbp_next_dev(struct sbp_target *target, int lun)
978 struct sbp_dev **sdevp;
981 for (i = lun, sdevp = &target->luns[lun]; i < target->num_lun;
983 if (*sdevp != NULL && (*sdevp)->status == SBP_DEV_PROBE)
990 sbp_cam_scan_lun(struct cam_periph *periph, union ccb *ccb)
992 struct sbp_target *target;
993 struct sbp_dev *sdev;
995 sdev = (struct sbp_dev *) ccb->ccb_h.ccb_sdev_ptr;
996 target = sdev->target;
997 SBP_LOCK_ASSERT(target->sbp);
999 device_printf(sdev->target->sbp->fd.dev,
1000 "%s:%s\n", __func__, sdev->bustgtlun);
1002 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
1003 sdev->status = SBP_DEV_ATTACHED;
1005 device_printf(sdev->target->sbp->fd.dev,
1006 "%s:%s failed\n", __func__, sdev->bustgtlun);
1008 sdev = sbp_next_dev(target, sdev->lun_id + 1);
1014 xpt_setup_ccb(&ccb->ccb_h, sdev->path, SCAN_PRI);
1015 ccb->ccb_h.ccb_sdev_ptr = sdev;
1017 xpt_release_devq(sdev->path, sdev->freeze, TRUE);
1022 sbp_cam_scan_target(void *arg)
1024 struct sbp_target *target = (struct sbp_target *)arg;
1025 struct sbp_dev *sdev;
1028 SBP_LOCK_ASSERT(target->sbp);
1029 sdev = sbp_next_dev(target, 0);
1031 printf("sbp_cam_scan_target: nothing to do for target%d\n",
1036 device_printf(sdev->target->sbp->fd.dev,
1037 "%s:%s\n", __func__, sdev->bustgtlun);
1039 ccb = malloc(sizeof(union ccb), M_SBP, M_NOWAIT | M_ZERO);
1041 printf("sbp_cam_scan_target: malloc failed\n");
1044 xpt_setup_ccb(&ccb->ccb_h, sdev->path, SCAN_PRI);
1045 ccb->ccb_h.func_code = XPT_SCAN_LUN;
1046 ccb->ccb_h.cbfcnp = sbp_cam_scan_lun;
1047 ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1048 ccb->crcn.flags = CAM_FLAG_NONE;
1049 ccb->ccb_h.ccb_sdev_ptr = sdev;
1051 /* The scan is in progress now. */
1053 xpt_release_devq(sdev->path, sdev->freeze, TRUE);
1057 static __inline void
1058 sbp_scan_dev(struct sbp_dev *sdev)
1060 sdev->status = SBP_DEV_PROBE;
1061 callout_reset_sbt(&sdev->target->scan_callout, SBT_1MS * scan_delay, 0,
1062 sbp_cam_scan_target, (void *)sdev->target, 0);
1066 sbp_do_attach(struct fw_xfer *xfer)
1068 struct sbp_dev *sdev;
1069 struct sbp_target *target;
1070 struct sbp_softc *sbp;
1072 sdev = (struct sbp_dev *)xfer->sc;
1073 target = sdev->target;
1077 device_printf(sdev->target->sbp->fd.dev,
1078 "%s:%s\n", __func__, sdev->bustgtlun);
1080 sbp_xfer_free(xfer);
1082 if (sdev->path == NULL)
1083 xpt_create_path(&sdev->path, NULL,
1084 cam_sim_path(target->sbp->sim),
1085 target->target_id, sdev->lun_id);
1088 * Let CAM scan the bus if we are in the boot process.
1089 * XXX xpt_scan_bus cannot detect LUN larger than 0
1090 * if LUN 0 doesn't exist.
1093 sdev->status = SBP_DEV_ATTACHED;
1103 sbp_agent_reset_callback(struct fw_xfer *xfer)
1105 struct sbp_dev *sdev;
1107 sdev = (struct sbp_dev *)xfer->sc;
1109 device_printf(sdev->target->sbp->fd.dev,
1110 "%s:%s\n", __func__, sdev->bustgtlun);
1112 if (xfer->resp != 0) {
1113 device_printf(sdev->target->sbp->fd.dev,
1114 "%s:%s resp=%d\n", __func__, sdev->bustgtlun, xfer->resp);
1117 SBP_LOCK(sdev->target->sbp);
1118 sbp_xfer_free(xfer);
1120 xpt_release_devq(sdev->path, sdev->freeze, TRUE);
1123 SBP_UNLOCK(sdev->target->sbp);
1127 sbp_agent_reset(struct sbp_dev *sdev)
1129 struct fw_xfer *xfer;
1132 SBP_LOCK_ASSERT(sdev->target->sbp);
1134 device_printf(sdev->target->sbp->fd.dev,
1135 "%s:%s\n", __func__, sdev->bustgtlun);
1137 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x04);
1140 if (sdev->status == SBP_DEV_ATTACHED || sdev->status == SBP_DEV_PROBE)
1141 xfer->hand = sbp_agent_reset_callback;
1143 xfer->hand = sbp_do_attach;
1144 fp = &xfer->send.hdr;
1145 fp->mode.wreqq.data = htonl(0xf);
1146 fw_asyreq(xfer->fc, -1, xfer);
1147 sbp_abort_all_ocbs(sdev, CAM_BDR_SENT);
1151 sbp_busy_timeout_callback(struct fw_xfer *xfer)
1153 struct sbp_dev *sdev;
1155 sdev = (struct sbp_dev *)xfer->sc;
1157 device_printf(sdev->target->sbp->fd.dev,
1158 "%s:%s\n", __func__, sdev->bustgtlun);
1160 SBP_LOCK(sdev->target->sbp);
1161 sbp_xfer_free(xfer);
1162 sbp_agent_reset(sdev);
1163 SBP_UNLOCK(sdev->target->sbp);
1167 sbp_busy_timeout(struct sbp_dev *sdev)
1170 struct fw_xfer *xfer;
1172 device_printf(sdev->target->sbp->fd.dev,
1173 "%s:%s\n", __func__, sdev->bustgtlun);
1175 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0);
1177 xfer->hand = sbp_busy_timeout_callback;
1178 fp = &xfer->send.hdr;
1179 fp->mode.wreqq.dest_hi = 0xffff;
1180 fp->mode.wreqq.dest_lo = 0xf0000000 | BUSY_TIMEOUT;
1181 fp->mode.wreqq.data = htonl((1 << (13+12)) | 0xf);
1182 fw_asyreq(xfer->fc, -1, xfer);
1186 sbp_orb_pointer_callback(struct fw_xfer *xfer)
1188 struct sbp_dev *sdev;
1189 sdev = (struct sbp_dev *)xfer->sc;
1192 device_printf(sdev->target->sbp->fd.dev,
1193 "%s:%s\n", __func__, sdev->bustgtlun);
1195 if (xfer->resp != 0) {
1197 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
1199 SBP_LOCK(sdev->target->sbp);
1200 sbp_xfer_free(xfer);
1202 sdev->flags &= ~ORB_POINTER_ACTIVE;
1204 if ((sdev->flags & ORB_POINTER_NEED) != 0) {
1205 struct sbp_ocb *ocb;
1207 sdev->flags &= ~ORB_POINTER_NEED;
1208 ocb = STAILQ_FIRST(&sdev->ocbs);
1210 sbp_orb_pointer(sdev, ocb);
1212 SBP_UNLOCK(sdev->target->sbp);
1217 sbp_orb_pointer(struct sbp_dev *sdev, struct sbp_ocb *ocb)
1219 struct fw_xfer *xfer;
1222 device_printf(sdev->target->sbp->fd.dev,
1224 __func__, sdev->bustgtlun,
1225 (uint32_t)ocb->bus_addr);
1228 SBP_LOCK_ASSERT(sdev->target->sbp);
1230 if ((sdev->flags & ORB_POINTER_ACTIVE) != 0) {
1232 printf("%s: orb pointer active\n", __func__);
1234 sdev->flags |= ORB_POINTER_NEED;
1238 sdev->flags |= ORB_POINTER_ACTIVE;
1239 xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0x08);
1242 xfer->hand = sbp_orb_pointer_callback;
1244 fp = &xfer->send.hdr;
1245 fp->mode.wreqb.len = 8;
1246 fp->mode.wreqb.extcode = 0;
1247 xfer->send.payload[0] =
1248 htonl(((sdev->target->sbp->fd.fc->nodeid | FWLOCALBUS )<< 16));
1249 xfer->send.payload[1] = htonl((uint32_t)ocb->bus_addr);
1251 if (fw_asyreq(xfer->fc, -1, xfer) != 0) {
1252 sbp_xfer_free(xfer);
1253 ocb->ccb->ccb_h.status = CAM_REQ_INVALID;
1259 sbp_doorbell_callback(struct fw_xfer *xfer)
1261 struct sbp_dev *sdev;
1262 sdev = (struct sbp_dev *)xfer->sc;
1265 device_printf(sdev->target->sbp->fd.dev,
1266 "%s:%s\n", __func__, sdev->bustgtlun);
1268 if (xfer->resp != 0) {
1270 device_printf(sdev->target->sbp->fd.dev,
1271 "%s: xfer->resp = %d\n", __func__, xfer->resp);
1273 SBP_LOCK(sdev->target->sbp);
1274 sbp_xfer_free(xfer);
1275 sdev->flags &= ~ORB_DOORBELL_ACTIVE;
1276 if ((sdev->flags & ORB_DOORBELL_NEED) != 0) {
1277 sdev->flags &= ~ORB_DOORBELL_NEED;
1280 SBP_UNLOCK(sdev->target->sbp);
1284 sbp_doorbell(struct sbp_dev *sdev)
1286 struct fw_xfer *xfer;
1289 device_printf(sdev->target->sbp->fd.dev,
1290 "%s:%s\n", __func__, sdev->bustgtlun);
1293 if ((sdev->flags & ORB_DOORBELL_ACTIVE) != 0) {
1294 sdev->flags |= ORB_DOORBELL_NEED;
1297 sdev->flags |= ORB_DOORBELL_ACTIVE;
1298 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x10);
1301 xfer->hand = sbp_doorbell_callback;
1302 fp = &xfer->send.hdr;
1303 fp->mode.wreqq.data = htonl(0xf);
1304 fw_asyreq(xfer->fc, -1, xfer);
1307 static struct fw_xfer *
1308 sbp_write_cmd(struct sbp_dev *sdev, int tcode, int offset)
1310 struct fw_xfer *xfer;
1312 struct sbp_target *target;
1315 SBP_LOCK_ASSERT(sdev->target->sbp);
1317 target = sdev->target;
1318 xfer = STAILQ_FIRST(&target->xferlist);
1320 if (target->n_xfer > 5 /* XXX */) {
1321 printf("sbp: no more xfer for this target\n");
1324 xfer = fw_xfer_alloc_buf(M_SBP, 8, 0);
1326 printf("sbp: fw_xfer_alloc_buf failed\n");
1331 printf("sbp: alloc %d xfer\n", target->n_xfer);
1334 STAILQ_REMOVE_HEAD(&target->xferlist, link);
1338 xfer->recv.pay_len = 0;
1339 xfer->send.spd = min(sdev->target->fwdev->speed, max_speed);
1340 xfer->fc = sdev->target->sbp->fd.fc;
1343 if (tcode == FWTCODE_WREQB)
1344 xfer->send.pay_len = 8;
1346 xfer->send.pay_len = 0;
1348 xfer->sc = (caddr_t)sdev;
1349 fp = &xfer->send.hdr;
1350 fp->mode.wreqq.dest_hi = sdev->login->cmd_hi;
1351 fp->mode.wreqq.dest_lo = sdev->login->cmd_lo + offset;
1352 fp->mode.wreqq.tlrt = 0;
1353 fp->mode.wreqq.tcode = tcode;
1354 fp->mode.wreqq.pri = 0;
1355 fp->mode.wreqq.dst = FWLOCALBUS | sdev->target->fwdev->dst;
1361 sbp_mgm_orb(struct sbp_dev *sdev, int func, struct sbp_ocb *aocb)
1363 struct fw_xfer *xfer;
1365 struct sbp_ocb *ocb;
1366 struct sbp_target *target;
1369 target = sdev->target;
1370 nid = target->sbp->fd.fc->nodeid | FWLOCALBUS;
1372 SBP_LOCK_ASSERT(target->sbp);
1373 if (func == ORB_FUN_RUNQUEUE) {
1374 ocb = STAILQ_FIRST(&target->mgm_ocb_queue);
1375 if (target->mgm_ocb_cur != NULL || ocb == NULL) {
1378 STAILQ_REMOVE_HEAD(&target->mgm_ocb_queue, ocb);
1381 if ((ocb = sbp_get_ocb(sdev)) == NULL) {
1385 ocb->flags = OCB_ACT_MGM;
1388 bzero((void *)ocb->orb, sizeof(ocb->orb));
1389 ocb->orb[6] = htonl((nid << 16) | SBP_BIND_HI);
1390 ocb->orb[7] = htonl(SBP_DEV2ADDR(target->target_id, sdev->lun_id));
1393 device_printf(sdev->target->sbp->fd.dev,
1395 __func__,sdev->bustgtlun,
1396 orb_fun_name[(func>>16)&0xf]);
1400 ocb->orb[0] = ocb->orb[1] = 0; /* password */
1401 ocb->orb[2] = htonl(nid << 16);
1402 ocb->orb[3] = htonl(sdev->dma.bus_addr);
1403 ocb->orb[4] = htonl(ORB_NOTIFY | sdev->lun_id);
1405 ocb->orb[4] |= htonl(ORB_EXV);
1406 ocb->orb[5] = htonl(SBP_LOGIN_SIZE);
1407 fwdma_sync(&sdev->dma, BUS_DMASYNC_PREREAD);
1410 ocb->orb[0] = htonl((0 << 16) | 0);
1411 ocb->orb[1] = htonl(aocb->bus_addr & 0xffffffff);
1418 ocb->orb[4] = htonl(ORB_NOTIFY | func | sdev->login->id);
1422 if (target->mgm_ocb_cur != NULL) {
1423 /* there is a standing ORB */
1424 STAILQ_INSERT_TAIL(&sdev->target->mgm_ocb_queue, ocb, ocb);
1428 target->mgm_ocb_cur = ocb;
1430 callout_reset(&target->mgm_ocb_timeout, 5 * hz,
1431 sbp_mgm_timeout, (caddr_t)ocb);
1432 xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0);
1436 xfer->hand = sbp_mgm_callback;
1438 fp = &xfer->send.hdr;
1439 fp->mode.wreqb.dest_hi = sdev->target->mgm_hi;
1440 fp->mode.wreqb.dest_lo = sdev->target->mgm_lo;
1441 fp->mode.wreqb.len = 8;
1442 fp->mode.wreqb.extcode = 0;
1443 xfer->send.payload[0] = htonl(nid << 16);
1444 xfer->send.payload[1] = htonl(ocb->bus_addr & 0xffffffff);
1446 fw_asyreq(xfer->fc, -1, xfer);
1450 sbp_print_scsi_cmd(struct sbp_ocb *ocb)
1452 struct ccb_scsiio *csio;
1454 csio = &ocb->ccb->csio;
1455 printf("%s:%d:%d XPT_SCSI_IO: "
1456 "cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x"
1458 "%db cmd/%db data/%db sense\n",
1459 device_get_nameunit(ocb->sdev->target->sbp->fd.dev),
1460 ocb->ccb->ccb_h.target_id, ocb->ccb->ccb_h.target_lun,
1461 csio->cdb_io.cdb_bytes[0],
1462 csio->cdb_io.cdb_bytes[1],
1463 csio->cdb_io.cdb_bytes[2],
1464 csio->cdb_io.cdb_bytes[3],
1465 csio->cdb_io.cdb_bytes[4],
1466 csio->cdb_io.cdb_bytes[5],
1467 csio->cdb_io.cdb_bytes[6],
1468 csio->cdb_io.cdb_bytes[7],
1469 csio->cdb_io.cdb_bytes[8],
1470 csio->cdb_io.cdb_bytes[9],
1471 ocb->ccb->ccb_h.flags & CAM_DIR_MASK,
1472 csio->cdb_len, csio->dxfer_len,
1477 sbp_scsi_status(struct sbp_status *sbp_status, struct sbp_ocb *ocb)
1479 struct sbp_cmd_status *sbp_cmd_status;
1480 struct scsi_sense_data_fixed *sense;
1482 sbp_cmd_status = (struct sbp_cmd_status *)sbp_status->data;
1483 sense = (struct scsi_sense_data_fixed *)&ocb->ccb->csio.sense_data;
1486 sbp_print_scsi_cmd(ocb);
1487 /* XXX need decode status */
1488 printf("%s: SCSI status %x sfmt %x valid %x key %x code %x qlfr %x len %d\n",
1489 ocb->sdev->bustgtlun,
1490 sbp_cmd_status->status,
1491 sbp_cmd_status->sfmt,
1492 sbp_cmd_status->valid,
1493 sbp_cmd_status->s_key,
1494 sbp_cmd_status->s_code,
1495 sbp_cmd_status->s_qlfr,
1499 switch (sbp_cmd_status->status) {
1500 case SCSI_STATUS_CHECK_COND:
1501 case SCSI_STATUS_BUSY:
1502 case SCSI_STATUS_CMD_TERMINATED:
1503 if(sbp_cmd_status->sfmt == SBP_SFMT_CURR){
1504 sense->error_code = SSD_CURRENT_ERROR;
1506 sense->error_code = SSD_DEFERRED_ERROR;
1508 if(sbp_cmd_status->valid)
1509 sense->error_code |= SSD_ERRCODE_VALID;
1510 sense->flags = sbp_cmd_status->s_key;
1511 if(sbp_cmd_status->mark)
1512 sense->flags |= SSD_FILEMARK;
1513 if(sbp_cmd_status->eom)
1514 sense->flags |= SSD_EOM;
1515 if(sbp_cmd_status->ill_len)
1516 sense->flags |= SSD_ILI;
1518 bcopy(&sbp_cmd_status->info, &sense->info[0], 4);
1520 if (sbp_status->len <= 1)
1521 /* XXX not scsi status. shouldn't be happened */
1522 sense->extra_len = 0;
1523 else if (sbp_status->len <= 4)
1524 /* add_sense_code(_qual), info, cmd_spec_info */
1525 sense->extra_len = 6;
1527 /* fru, sense_key_spec */
1528 sense->extra_len = 10;
1530 bcopy(&sbp_cmd_status->cdb, &sense->cmd_spec_info[0], 4);
1532 sense->add_sense_code = sbp_cmd_status->s_code;
1533 sense->add_sense_code_qual = sbp_cmd_status->s_qlfr;
1534 sense->fru = sbp_cmd_status->fru;
1536 bcopy(&sbp_cmd_status->s_keydep[0],
1537 &sense->sense_key_spec[0], 3);
1539 ocb->ccb->csio.scsi_status = sbp_cmd_status->status;
1540 ocb->ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR
1541 | CAM_AUTOSNS_VALID;
1546 for( j = 0 ; j < 32 ; j+=8){
1547 printf("sense %02x%02x %02x%02x %02x%02x %02x%02x\n",
1548 tmp[j], tmp[j+1], tmp[j+2], tmp[j+3],
1549 tmp[j+4], tmp[j+5], tmp[j+6], tmp[j+7]);
1556 device_printf(ocb->sdev->target->sbp->fd.dev,
1557 "%s:%s unknown scsi status 0x%x\n",
1558 __func__, ocb->sdev->bustgtlun,
1559 sbp_cmd_status->status);
1564 sbp_fix_inq_data(struct sbp_ocb *ocb)
1567 struct sbp_dev *sdev;
1568 struct scsi_inquiry_data *inq;
1573 if (ccb->csio.cdb_io.cdb_bytes[1] & SI_EVPD)
1576 device_printf(sdev->target->sbp->fd.dev,
1577 "%s:%s\n", __func__, sdev->bustgtlun);
1579 inq = (struct scsi_inquiry_data *) ccb->csio.data_ptr;
1580 switch (SID_TYPE(inq)) {
1584 * XXX Convert Direct Access device to RBC.
1585 * I've never seen FireWire DA devices which support READ_6.
1587 if (SID_TYPE(inq) == T_DIRECT)
1588 inq->device |= T_RBC; /* T_DIRECT == 0 */
1593 * Override vendor/product/revision information.
1594 * Some devices sometimes return strange strings.
1597 bcopy(sdev->vendor, inq->vendor, sizeof(inq->vendor));
1598 bcopy(sdev->product, inq->product, sizeof(inq->product));
1599 bcopy(sdev->revision+2, inq->revision, sizeof(inq->revision));
1604 * Force to enable/disable tagged queuing.
1605 * XXX CAM also checks SCP_QUEUE_DQUE flag in the control mode page.
1608 inq->flags |= SID_CmdQue;
1609 else if (sbp_tags < 0)
1610 inq->flags &= ~SID_CmdQue;
1615 sbp_recv1(struct fw_xfer *xfer)
1621 struct sbp_softc *sbp;
1622 struct sbp_dev *sdev;
1623 struct sbp_ocb *ocb;
1624 struct sbp_login_res *login_res = NULL;
1625 struct sbp_status *sbp_status;
1626 struct sbp_target *target;
1627 int orb_fun, status_valid0, status_valid, t, l, reset_agent = 0;
1631 ld = xfer->recv.buf;
1632 printf("sbp %x %d %d %08x %08x %08x %08x\n",
1633 xfer->resp, xfer->recv.len, xfer->recv.off, ntohl(ld[0]), ntohl(ld[1]), ntohl(ld[2]), ntohl(ld[3]));
1634 printf("sbp %08x %08x %08x %08x\n", ntohl(ld[4]), ntohl(ld[5]), ntohl(ld[6]), ntohl(ld[7]));
1635 printf("sbp %08x %08x %08x %08x\n", ntohl(ld[8]), ntohl(ld[9]), ntohl(ld[10]), ntohl(ld[11]));
1637 sbp = (struct sbp_softc *)xfer->sc;
1638 SBP_LOCK_ASSERT(sbp);
1639 if (xfer->resp != 0){
1640 printf("sbp_recv: xfer->resp = %d\n", xfer->resp);
1643 if (xfer->recv.payload == NULL){
1644 printf("sbp_recv: xfer->recv.payload == NULL\n");
1647 rfp = &xfer->recv.hdr;
1648 if(rfp->mode.wreqb.tcode != FWTCODE_WREQB){
1649 printf("sbp_recv: tcode = %d\n", rfp->mode.wreqb.tcode);
1652 sbp_status = (struct sbp_status *)xfer->recv.payload;
1653 addr = rfp->mode.wreqb.dest_lo;
1655 printf("received address 0x%x\n", addr);
1657 t = SBP_ADDR2TRG(addr);
1658 if (t >= SBP_NUM_TARGETS) {
1659 device_printf(sbp->fd.dev,
1660 "sbp_recv1: invalid target %d\n", t);
1663 target = &sbp->targets[t];
1664 l = SBP_ADDR2LUN(addr);
1665 if (l >= target->num_lun || target->luns[l] == NULL) {
1666 device_printf(sbp->fd.dev,
1667 "sbp_recv1: invalid lun %d (target=%d)\n", l, t);
1670 sdev = target->luns[l];
1673 switch (sbp_status->src) {
1676 /* check mgm_ocb_cur first */
1677 ocb = target->mgm_ocb_cur;
1679 if (OCB_MATCH(ocb, sbp_status)) {
1680 callout_stop(&target->mgm_ocb_timeout);
1681 target->mgm_ocb_cur = NULL;
1685 ocb = sbp_dequeue_ocb(sdev, sbp_status);
1687 device_printf(sdev->target->sbp->fd.dev,
1688 #if defined(__DragonFly__) || __FreeBSD_version < 500000
1689 "%s:%s No ocb(%lx) on the queue\n",
1691 "%s:%s No ocb(%x) on the queue\n",
1693 __func__,sdev->bustgtlun,
1694 ntohl(sbp_status->orb_lo));
1699 device_printf(sdev->target->sbp->fd.dev,
1700 "%s:%s unsolicit status received\n",
1701 __func__, sdev->bustgtlun);
1704 device_printf(sdev->target->sbp->fd.dev,
1705 "%s:%s unknown sbp_status->src\n",
1706 __func__, sdev->bustgtlun);
1709 status_valid0 = (sbp_status->src < 2
1710 && sbp_status->resp == ORB_RES_CMPL
1711 && sbp_status->dead == 0);
1712 status_valid = (status_valid0 && sbp_status->status == 0);
1714 if (!status_valid0 || debug > 2){
1717 device_printf(sdev->target->sbp->fd.dev,
1718 "%s:%s ORB status src:%x resp:%x dead:%x"
1719 #if defined(__DragonFly__) || __FreeBSD_version < 500000
1720 " len:%x stat:%x orb:%x%08lx\n",
1722 " len:%x stat:%x orb:%x%08x\n",
1724 __func__, sdev->bustgtlun,
1725 sbp_status->src, sbp_status->resp, sbp_status->dead,
1726 sbp_status->len, sbp_status->status,
1727 ntohs(sbp_status->orb_hi), ntohl(sbp_status->orb_lo));
1729 device_printf(sdev->target->sbp->fd.dev,
1730 "%s\n", sdev->bustgtlun);
1731 status = sbp_status->status;
1732 switch(sbp_status->resp) {
1734 if (status > MAX_ORB_STATUS0)
1735 printf("%s\n", orb_status0[MAX_ORB_STATUS0]);
1737 printf("%s\n", orb_status0[status]);
1740 printf("Obj: %s, Error: %s\n",
1741 orb_status1_object[(status>>6) & 3],
1742 orb_status1_serial_bus_error[status & 0xf]);
1745 printf("Illegal request\n");
1748 printf("Vendor dependent\n");
1751 printf("unknown respose code %d\n", sbp_status->resp);
1755 /* we have to reset the fetch agent if it's dead */
1756 if (sbp_status->dead) {
1758 xpt_freeze_devq(sdev->path, 1);
1767 switch(ntohl(ocb->orb[4]) & ORB_FMT_MSK){
1773 switch(ocb->flags) {
1775 orb_fun = ntohl(ocb->orb[4]) & ORB_FUN_MSK;
1779 fwdma_sync(&sdev->dma, BUS_DMASYNC_POSTREAD);
1780 login_res = sdev->login;
1781 login_res->len = ntohs(login_res->len);
1782 login_res->id = ntohs(login_res->id);
1783 login_res->cmd_hi = ntohs(login_res->cmd_hi);
1784 login_res->cmd_lo = ntohl(login_res->cmd_lo);
1787 device_printf(sdev->target->sbp->fd.dev,
1788 "%s:%s login: len %d, ID %d, cmd %08x%08x, recon_hold %d\n",
1789 __func__, sdev->bustgtlun,
1790 login_res->len, login_res->id,
1791 login_res->cmd_hi, login_res->cmd_lo,
1792 ntohs(login_res->recon_hold));
1794 sbp_busy_timeout(sdev);
1796 /* forgot logout? */
1797 device_printf(sdev->target->sbp->fd.dev,
1798 "%s:%s login failed\n",
1799 __func__, sdev->bustgtlun);
1800 sdev->status = SBP_DEV_RESET;
1804 login_res = sdev->login;
1807 device_printf(sdev->target->sbp->fd.dev,
1808 "%s:%s reconnect: len %d, ID %d, cmd %08x%08x\n",
1809 __func__, sdev->bustgtlun,
1810 login_res->len, login_res->id,
1811 login_res->cmd_hi, login_res->cmd_lo);
1813 if (sdev->status == SBP_DEV_ATTACHED)
1816 sbp_agent_reset(sdev);
1818 /* reconnection hold time exceed? */
1820 device_printf(sdev->target->sbp->fd.dev,
1821 "%s:%s reconnect failed\n",
1822 __func__, sdev->bustgtlun);
1828 sdev->status = SBP_DEV_RESET;
1831 sbp_busy_timeout(sdev);
1836 sbp_agent_reset(sdev);
1839 device_printf(sdev->target->sbp->fd.dev,
1840 "%s:%s unknown function %d\n",
1841 __func__, sdev->bustgtlun, orb_fun);
1844 sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL);
1848 if(ocb->ccb != NULL){
1852 if(sbp_status->len > 1){
1853 sbp_scsi_status(sbp_status, ocb);
1855 if(sbp_status->resp != ORB_RES_CMPL){
1856 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1858 ccb->ccb_h.status = CAM_REQ_CMP;
1861 /* fix up inq data */
1862 if (ccb->csio.cdb_io.cdb_bytes[0] == INQUIRY)
1863 sbp_fix_inq_data(ocb);
1873 sbp_free_ocb(sdev, ocb);
1876 sbp_agent_reset(sdev);
1879 xfer->recv.pay_len = SBP_RECV_LEN;
1880 /* The received packet is usually small enough to be stored within
1881 * the buffer. In that case, the controller return ack_complete and
1882 * no respose is necessary.
1884 * XXX fwohci.c and firewire.c should inform event_code such as
1885 * ack_complete or ack_pending to upper driver.
1889 sfp = (struct fw_pkt *)xfer->send.buf;
1890 sfp->mode.wres.dst = rfp->mode.wreqb.src;
1891 xfer->dst = sfp->mode.wres.dst;
1892 xfer->spd = min(sdev->target->fwdev->speed, max_speed);
1893 xfer->hand = sbp_loginres_callback;
1895 sfp->mode.wres.tlrt = rfp->mode.wreqb.tlrt;
1896 sfp->mode.wres.tcode = FWTCODE_WRES;
1897 sfp->mode.wres.rtcode = 0;
1898 sfp->mode.wres.pri = 0;
1900 fw_asyreq(xfer->fc, -1, xfer);
1903 STAILQ_INSERT_TAIL(&sbp->fwb.xferlist, xfer, link);
1908 sbp_recv(struct fw_xfer *xfer)
1910 struct sbp_softc *sbp;
1912 sbp = (struct sbp_softc *)xfer->sc;
1921 sbp_attach(device_t dev)
1923 struct sbp_softc *sbp;
1924 struct cam_devq *devq;
1925 struct firewire_comm *fc;
1928 if (DFLTPHYS > SBP_MAXPHYS)
1929 device_printf(dev, "Warning, DFLTPHYS(%dKB) is larger than "
1930 "SBP_MAXPHYS(%dKB).\n", DFLTPHYS / 1024,
1931 SBP_MAXPHYS / 1024);
1933 if (!firewire_phydma_enable)
1934 device_printf(dev, "Warning, hw.firewire.phydma_enable must be 1 "
1935 "for SBP over FireWire.\n");
1937 printf("sbp_attach (cold=%d)\n", cold);
1942 sbp = device_get_softc(dev);
1944 sbp->fd.fc = fc = device_get_ivars(dev);
1945 mtx_init(&sbp->mtx, "sbp", NULL, MTX_DEF);
1948 max_speed = fc->speed;
1950 error = bus_dma_tag_create(/*parent*/fc->dmat,
1951 /* XXX shoud be 4 for sane backend? */
1954 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
1955 /*highaddr*/BUS_SPACE_MAXADDR,
1956 /*filter*/NULL, /*filterarg*/NULL,
1957 /*maxsize*/0x100000, /*nsegments*/SBP_IND_MAX,
1958 /*maxsegsz*/SBP_SEG_MAX,
1959 /*flags*/BUS_DMA_ALLOCNOW,
1960 #if defined(__FreeBSD__) && __FreeBSD_version >= 501102
1961 /*lockfunc*/busdma_lock_mutex,
1962 /*lockarg*/&sbp->mtx,
1966 printf("sbp_attach: Could not allocate DMA tag "
1967 "- error %d\n", error);
1971 devq = cam_simq_alloc(/*maxopenings*/SBP_NUM_OCB);
1975 for( i = 0 ; i < SBP_NUM_TARGETS ; i++){
1976 sbp->targets[i].fwdev = NULL;
1977 sbp->targets[i].luns = NULL;
1980 sbp->sim = cam_sim_alloc(sbp_action, sbp_poll, "sbp", sbp,
1981 device_get_unit(dev),
1984 /*tagged*/ SBP_QUEUE_LEN - 1,
1987 if (sbp->sim == NULL) {
1988 cam_simq_free(devq);
1993 if (xpt_bus_register(sbp->sim, dev, /*bus*/0) != CAM_SUCCESS)
1996 if (xpt_create_path(&sbp->path, NULL, cam_sim_path(sbp->sim),
1997 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1998 xpt_bus_deregister(cam_sim_path(sbp->sim));
2003 /* We reserve 16 bit space (4 bytes X 64 targets X 256 luns) */
2004 sbp->fwb.start = ((u_int64_t)SBP_BIND_HI << 32) | SBP_DEV2ADDR(0, 0);
2005 sbp->fwb.end = sbp->fwb.start + 0xffff;
2006 /* pre-allocate xfer */
2007 STAILQ_INIT(&sbp->fwb.xferlist);
2008 fw_xferlist_add(&sbp->fwb.xferlist, M_SBP,
2009 /*send*/ 0, /*recv*/ SBP_RECV_LEN, SBP_NUM_OCB/2,
2010 fc, (void *)sbp, sbp_recv);
2012 fw_bindadd(fc, &sbp->fwb);
2014 sbp->fd.post_busreset = sbp_post_busreset;
2015 sbp->fd.post_explore = sbp_post_explore;
2017 if (fc->status != -1) {
2018 sbp_post_busreset((void *)sbp);
2019 sbp_post_explore((void *)sbp);
2022 xpt_async(AC_BUS_RESET, sbp->path, /*arg*/ NULL);
2028 cam_sim_free(sbp->sim, /*free_devq*/TRUE);
2033 sbp_logout_all(struct sbp_softc *sbp)
2035 struct sbp_target *target;
2036 struct sbp_dev *sdev;
2040 printf("sbp_logout_all\n");
2042 SBP_LOCK_ASSERT(sbp);
2043 for (i = 0 ; i < SBP_NUM_TARGETS ; i ++) {
2044 target = &sbp->targets[i];
2045 if (target->luns == NULL)
2047 for (j = 0; j < target->num_lun; j++) {
2048 sdev = target->luns[j];
2051 callout_stop(&sdev->login_callout);
2052 if (sdev->status >= SBP_DEV_TOATTACH &&
2053 sdev->status <= SBP_DEV_ATTACHED)
2054 sbp_mgm_orb(sdev, ORB_FUN_LGO, NULL);
2062 sbp_shutdown(device_t dev)
2064 struct sbp_softc *sbp = ((struct sbp_softc *)device_get_softc(dev));
2067 sbp_logout_all(sbp);
2073 sbp_free_sdev(struct sbp_dev *sdev)
2075 struct sbp_softc *sbp;
2080 sbp = sdev->target->sbp;
2082 callout_drain(&sdev->login_callout);
2083 for (i = 0; i < SBP_QUEUE_LEN; i++) {
2084 callout_drain(&sdev->ocb[i].timer);
2085 bus_dmamap_destroy(sbp->dmat, sdev->ocb[i].dmamap);
2087 fwdma_free(sbp->fd.fc, &sdev->dma);
2093 sbp_free_target(struct sbp_target *target)
2095 struct sbp_softc *sbp;
2096 struct fw_xfer *xfer, *next;
2099 if (target->luns == NULL)
2102 SBP_LOCK_ASSERT(sbp);
2104 callout_drain(&target->mgm_ocb_timeout);
2105 callout_drain(&target->scan_callout);
2107 for (i = 0; i < target->num_lun; i++)
2108 sbp_free_sdev(target->luns[i]);
2110 STAILQ_FOREACH_SAFE(xfer, &target->xferlist, link, next) {
2111 fw_xfer_free_buf(xfer);
2113 STAILQ_INIT(&target->xferlist);
2114 free(target->luns, M_SBP);
2115 target->num_lun = 0;
2116 target->luns = NULL;
2117 target->fwdev = NULL;
2121 sbp_detach(device_t dev)
2123 struct sbp_softc *sbp = ((struct sbp_softc *)device_get_softc(dev));
2124 struct firewire_comm *fc = sbp->fd.fc;
2128 printf("sbp_detach\n");
2132 for (i = 0; i < SBP_NUM_TARGETS; i ++)
2133 sbp_cam_detach_target(&sbp->targets[i]);
2135 xpt_async(AC_LOST_DEVICE, sbp->path, NULL);
2136 xpt_free_path(sbp->path);
2137 xpt_bus_deregister(cam_sim_path(sbp->sim));
2138 cam_sim_free(sbp->sim, /*free_devq*/ TRUE);
2140 sbp_logout_all(sbp);
2143 /* XXX wait for logout completion */
2144 pause("sbpdtc", hz/2);
2147 for (i = 0 ; i < SBP_NUM_TARGETS ; i ++)
2148 sbp_free_target(&sbp->targets[i]);
2151 fw_bindremove(fc, &sbp->fwb);
2152 fw_xferlist_remove(&sbp->fwb.xferlist);
2154 bus_dma_tag_destroy(sbp->dmat);
2155 mtx_destroy(&sbp->mtx);
2161 sbp_cam_detach_sdev(struct sbp_dev *sdev)
2165 if (sdev->status == SBP_DEV_DEAD)
2167 if (sdev->status == SBP_DEV_RESET)
2169 SBP_LOCK_ASSERT(sdev->target->sbp);
2170 sbp_abort_all_ocbs(sdev, CAM_DEV_NOT_THERE);
2172 xpt_release_devq(sdev->path,
2173 sdev->freeze, TRUE);
2175 xpt_async(AC_LOST_DEVICE, sdev->path, NULL);
2176 xpt_free_path(sdev->path);
2182 sbp_cam_detach_target(struct sbp_target *target)
2186 SBP_LOCK_ASSERT(target->sbp);
2187 if (target->luns != NULL) {
2189 printf("sbp_detach_target %d\n", target->target_id);
2191 callout_stop(&target->scan_callout);
2192 for (i = 0; i < target->num_lun; i++)
2193 sbp_cam_detach_sdev(target->luns[i]);
2198 sbp_target_reset(struct sbp_dev *sdev, int method)
2201 struct sbp_target *target = sdev->target;
2202 struct sbp_dev *tsdev;
2204 SBP_LOCK_ASSERT(target->sbp);
2205 for (i = 0; i < target->num_lun; i++) {
2206 tsdev = target->luns[i];
2209 if (tsdev->status == SBP_DEV_DEAD)
2211 if (tsdev->status == SBP_DEV_RESET)
2213 xpt_freeze_devq(tsdev->path, 1);
2215 sbp_abort_all_ocbs(tsdev, CAM_CMD_TIMEOUT);
2217 tsdev->status = SBP_DEV_LOGIN;
2221 printf("target reset\n");
2222 sbp_mgm_orb(sdev, ORB_FUN_RST, NULL);
2225 printf("reset start\n");
2226 sbp_reset_start(sdev);
2233 sbp_mgm_timeout(void *arg)
2235 struct sbp_ocb *ocb = (struct sbp_ocb *)arg;
2236 struct sbp_dev *sdev = ocb->sdev;
2237 struct sbp_target *target = sdev->target;
2239 SBP_LOCK_ASSERT(target->sbp);
2240 device_printf(sdev->target->sbp->fd.dev,
2241 "%s:%s request timeout(mgm orb:0x%08x)\n",
2242 __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr);
2243 target->mgm_ocb_cur = NULL;
2244 sbp_free_ocb(sdev, ocb);
2247 printf("run next request\n");
2248 sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL);
2250 device_printf(sdev->target->sbp->fd.dev,
2251 "%s:%s reset start\n",
2252 __func__, sdev->bustgtlun);
2253 sbp_reset_start(sdev);
2257 sbp_timeout(void *arg)
2259 struct sbp_ocb *ocb = (struct sbp_ocb *)arg;
2260 struct sbp_dev *sdev = ocb->sdev;
2262 device_printf(sdev->target->sbp->fd.dev,
2263 "%s:%s request timeout(cmd orb:0x%08x) ... ",
2264 __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr);
2266 SBP_LOCK_ASSERT(sdev->target->sbp);
2268 switch(sdev->timeout) {
2270 printf("agent reset\n");
2271 xpt_freeze_devq(sdev->path, 1);
2273 sbp_abort_all_ocbs(sdev, CAM_CMD_TIMEOUT);
2274 sbp_agent_reset(sdev);
2278 sbp_target_reset(sdev, sdev->timeout - 1);
2283 sbp_cam_detach_target(target);
2284 if (target->luns != NULL)
2285 free(target->luns, M_SBP);
2286 target->num_lun = 0;
2287 target->luns = NULL;
2288 target->fwdev = NULL;
2294 sbp_action(struct cam_sim *sim, union ccb *ccb)
2297 struct sbp_softc *sbp = (struct sbp_softc *)sim->softc;
2298 struct sbp_target *target = NULL;
2299 struct sbp_dev *sdev = NULL;
2302 SBP_LOCK_ASSERT(sbp);
2303 /* target:lun -> sdev mapping */
2305 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD
2306 && ccb->ccb_h.target_id < SBP_NUM_TARGETS) {
2307 target = &sbp->targets[ccb->ccb_h.target_id];
2308 if (target->fwdev != NULL
2309 && ccb->ccb_h.target_lun != CAM_LUN_WILDCARD
2310 && ccb->ccb_h.target_lun < target->num_lun) {
2311 sdev = target->luns[ccb->ccb_h.target_lun];
2312 if (sdev != NULL && sdev->status != SBP_DEV_ATTACHED &&
2313 sdev->status != SBP_DEV_PROBE)
2320 printf("invalid target %d lun %d\n",
2321 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2324 switch (ccb->ccb_h.func_code) {
2327 case XPT_GET_TRAN_SETTINGS:
2328 case XPT_SET_TRAN_SETTINGS:
2329 case XPT_CALC_GEOMETRY:
2332 printf("%s:%d:%d:func_code 0x%04x: "
2333 "Invalid target (target needed)\n",
2334 device_get_nameunit(sbp->fd.dev),
2335 ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
2336 ccb->ccb_h.func_code);
2339 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2346 /* The opcodes sometimes aimed at a target (sc is valid),
2347 * sometimes aimed at the SIM (sc is invalid and target is
2348 * CAM_TARGET_WILDCARD)
2351 ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
2353 printf("%s:%d:%d func_code 0x%04x: "
2354 "Invalid target (no wildcard)\n",
2355 device_get_nameunit(sbp->fd.dev),
2356 ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
2357 ccb->ccb_h.func_code);
2359 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2365 /* XXX Hm, we should check the input parameters */
2369 switch (ccb->ccb_h.func_code) {
2372 struct ccb_scsiio *csio;
2373 struct sbp_ocb *ocb;
2378 mtx_assert(sim->mtx, MA_OWNED);
2381 printf("%s:%d:%d XPT_SCSI_IO: "
2382 "cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x"
2384 "%db cmd/%db data/%db sense\n",
2385 device_get_nameunit(sbp->fd.dev),
2386 ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
2387 csio->cdb_io.cdb_bytes[0],
2388 csio->cdb_io.cdb_bytes[1],
2389 csio->cdb_io.cdb_bytes[2],
2390 csio->cdb_io.cdb_bytes[3],
2391 csio->cdb_io.cdb_bytes[4],
2392 csio->cdb_io.cdb_bytes[5],
2393 csio->cdb_io.cdb_bytes[6],
2394 csio->cdb_io.cdb_bytes[7],
2395 csio->cdb_io.cdb_bytes[8],
2396 csio->cdb_io.cdb_bytes[9],
2397 ccb->ccb_h.flags & CAM_DIR_MASK,
2398 csio->cdb_len, csio->dxfer_len,
2402 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2407 /* if we are in probe stage, pass only probe commands */
2408 if (sdev->status == SBP_DEV_PROBE) {
2410 name = xpt_path_periph(ccb->ccb_h.path)->periph_name;
2411 printf("probe stage, periph name: %s\n", name);
2412 if (strcmp(name, "probe") != 0) {
2413 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2419 if ((ocb = sbp_get_ocb(sdev)) == NULL) {
2420 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2421 if (sdev->freeze == 0) {
2422 xpt_freeze_devq(sdev->path, 1);
2429 ocb->flags = OCB_ACT_CMD;
2432 ccb->ccb_h.ccb_sdev_ptr = sdev;
2433 ocb->orb[0] = htonl(1U << 31);
2435 ocb->orb[2] = htonl(((sbp->fd.fc->nodeid | FWLOCALBUS )<< 16) );
2436 ocb->orb[3] = htonl(ocb->bus_addr + IND_PTR_OFFSET);
2437 speed = min(target->fwdev->speed, max_speed);
2438 ocb->orb[4] = htonl(ORB_NOTIFY | ORB_CMD_SPD(speed)
2439 | ORB_CMD_MAXP(speed + 7));
2440 if((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN){
2441 ocb->orb[4] |= htonl(ORB_CMD_IN);
2444 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2445 cdb = (void *)csio->cdb_io.cdb_ptr;
2447 cdb = (void *)&csio->cdb_io.cdb_bytes;
2448 bcopy(cdb, (void *)&ocb->orb[5], csio->cdb_len);
2450 printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[0]), ntohl(ocb->orb[1]), ntohl(ocb->orb[2]), ntohl(ocb->orb[3]));
2451 printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[4]), ntohl(ocb->orb[5]), ntohl(ocb->orb[6]), ntohl(ocb->orb[7]));
2453 if (ccb->csio.dxfer_len > 0) {
2456 error = bus_dmamap_load_ccb(/*dma tag*/sbp->dmat,
2457 /*dma map*/ocb->dmamap,
2463 printf("sbp: bus_dmamap_load error %d\n", error);
2465 sbp_execute_ocb(ocb, NULL, 0, 0);
2468 case XPT_CALC_GEOMETRY:
2470 struct ccb_calc_geometry *ccg;
2471 #if defined(__DragonFly__) || __FreeBSD_version < 501100
2473 uint32_t secs_per_cylinder;
2478 if (ccg->block_size == 0) {
2479 printf("sbp_action: block_size is 0.\n");
2480 ccb->ccb_h.status = CAM_REQ_INVALID;
2485 printf("%s:%d:%d:%d:XPT_CALC_GEOMETRY: "
2486 #if defined(__DragonFly__) || __FreeBSD_version < 500000
2487 "Volume size = %d\n",
2489 "Volume size = %jd\n",
2491 device_get_nameunit(sbp->fd.dev),
2492 cam_sim_path(sbp->sim),
2493 ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
2494 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
2500 #if defined(__DragonFly__) || __FreeBSD_version < 501100
2501 size_mb = ccg->volume_size
2502 / ((1024L * 1024L) / ccg->block_size);
2504 if (size_mb > 1024 && extended) {
2506 ccg->secs_per_track = 63;
2509 ccg->secs_per_track = 32;
2511 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2512 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2513 ccb->ccb_h.status = CAM_REQ_CMP;
2515 cam_calc_geometry(ccg, /*extended*/1);
2520 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
2524 printf("%s:%d:XPT_RESET_BUS: \n",
2525 device_get_nameunit(sbp->fd.dev), cam_sim_path(sbp->sim));
2528 ccb->ccb_h.status = CAM_REQ_INVALID;
2532 case XPT_PATH_INQ: /* Path routing inquiry */
2534 struct ccb_pathinq *cpi = &ccb->cpi;
2537 printf("%s:%d:%d XPT_PATH_INQ:.\n",
2538 device_get_nameunit(sbp->fd.dev),
2539 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2541 cpi->version_num = 1; /* XXX??? */
2542 cpi->hba_inquiry = PI_TAG_ABLE;
2543 cpi->target_sprt = 0;
2544 cpi->hba_misc = PIM_NOBUSRESET | PIM_NO_6_BYTE;
2545 cpi->hba_eng_cnt = 0;
2546 cpi->max_target = SBP_NUM_TARGETS - 1;
2547 cpi->max_lun = SBP_NUM_LUNS - 1;
2548 cpi->initiator_id = SBP_INITIATOR;
2549 cpi->bus_id = sim->bus_id;
2550 cpi->base_transfer_speed = 400 * 1000 / 8;
2551 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2552 strncpy(cpi->hba_vid, "SBP", HBA_IDLEN);
2553 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
2554 cpi->unit_number = sim->unit_number;
2555 cpi->transport = XPORT_SPI; /* XX should have a FireWire */
2556 cpi->transport_version = 2;
2557 cpi->protocol = PROTO_SCSI;
2558 cpi->protocol_version = SCSI_REV_2;
2560 cpi->ccb_h.status = CAM_REQ_CMP;
2564 case XPT_GET_TRAN_SETTINGS:
2566 struct ccb_trans_settings *cts = &ccb->cts;
2567 struct ccb_trans_settings_scsi *scsi =
2568 &cts->proto_specific.scsi;
2569 struct ccb_trans_settings_spi *spi =
2570 &cts->xport_specific.spi;
2572 cts->protocol = PROTO_SCSI;
2573 cts->protocol_version = SCSI_REV_2;
2574 cts->transport = XPORT_SPI; /* should have a FireWire */
2575 cts->transport_version = 2;
2576 spi->valid = CTS_SPI_VALID_DISC;
2577 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2578 scsi->valid = CTS_SCSI_VALID_TQ;
2579 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2581 printf("%s:%d:%d XPT_GET_TRAN_SETTINGS:.\n",
2582 device_get_nameunit(sbp->fd.dev),
2583 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2585 cts->ccb_h.status = CAM_REQ_CMP;
2590 ccb->ccb_h.status = CAM_UA_ABORT;
2593 case XPT_SET_TRAN_SETTINGS:
2596 ccb->ccb_h.status = CAM_REQ_INVALID;
2604 sbp_execute_ocb(void *arg, bus_dma_segment_t *segments, int seg, int error)
2607 struct sbp_ocb *ocb;
2608 struct sbp_ocb *prev;
2609 bus_dma_segment_t *s;
2612 printf("sbp_execute_ocb: error=%d\n", error);
2614 ocb = (struct sbp_ocb *)arg;
2617 printf("sbp_execute_ocb: seg %d", seg);
2618 for (i = 0; i < seg; i++)
2619 #if defined(__DragonFly__) || __FreeBSD_version < 500000
2620 printf(", %x:%d", segments[i].ds_addr, segments[i].ds_len);
2622 printf(", %jx:%jd", (uintmax_t)segments[i].ds_addr,
2623 (uintmax_t)segments[i].ds_len);
2629 /* direct pointer */
2631 if (s->ds_len > SBP_SEG_MAX)
2632 panic("ds_len > SBP_SEG_MAX, fix busdma code");
2633 ocb->orb[3] = htonl(s->ds_addr);
2634 ocb->orb[4] |= htonl(s->ds_len);
2635 } else if(seg > 1) {
2637 for (i = 0; i < seg; i++) {
2640 /* XXX LSI Logic "< 16 byte" bug might be hit */
2642 printf("sbp_execute_ocb: warning, "
2643 #if defined(__DragonFly__) || __FreeBSD_version < 500000
2644 "segment length(%d) is less than 16."
2646 "segment length(%zd) is less than 16."
2648 "(seg=%d/%d)\n", (size_t)s->ds_len, i+1, seg);
2650 if (s->ds_len > SBP_SEG_MAX)
2651 panic("ds_len > SBP_SEG_MAX, fix busdma code");
2652 ocb->ind_ptr[i].hi = htonl(s->ds_len << 16);
2653 ocb->ind_ptr[i].lo = htonl(s->ds_addr);
2655 ocb->orb[4] |= htonl(ORB_CMD_PTBL | seg);
2659 bus_dmamap_sync(ocb->sdev->target->sbp->dmat, ocb->dmamap,
2660 (ntohl(ocb->orb[4]) & ORB_CMD_IN) ?
2661 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2662 prev = sbp_enqueue_ocb(ocb->sdev, ocb);
2663 fwdma_sync(&ocb->sdev->dma, BUS_DMASYNC_PREWRITE);
2666 if (ocb->sdev->last_ocb != NULL)
2667 sbp_doorbell(ocb->sdev);
2669 sbp_orb_pointer(ocb->sdev, ocb);
2672 if (prev == NULL || (ocb->sdev->flags & ORB_LINK_DEAD) != 0) {
2673 ocb->sdev->flags &= ~ORB_LINK_DEAD;
2674 sbp_orb_pointer(ocb->sdev, ocb);
2680 sbp_poll(struct cam_sim *sim)
2682 struct sbp_softc *sbp;
2683 struct firewire_comm *fc;
2685 sbp = (struct sbp_softc *)sim->softc;
2688 fc->poll(fc, 0, -1);
2693 static struct sbp_ocb *
2694 sbp_dequeue_ocb(struct sbp_dev *sdev, struct sbp_status *sbp_status)
2696 struct sbp_ocb *ocb;
2697 struct sbp_ocb *next;
2701 device_printf(sdev->target->sbp->fd.dev,
2702 #if defined(__DragonFly__) || __FreeBSD_version < 500000
2703 "%s:%s 0x%08lx src %d\n",
2705 "%s:%s 0x%08x src %d\n",
2707 __func__, sdev->bustgtlun, ntohl(sbp_status->orb_lo), sbp_status->src);
2709 SBP_LOCK_ASSERT(sdev->target->sbp);
2710 STAILQ_FOREACH_SAFE(ocb, &sdev->ocbs, ocb, next) {
2711 if (OCB_MATCH(ocb, sbp_status)) {
2713 STAILQ_REMOVE(&sdev->ocbs, ocb, sbp_ocb, ocb);
2714 if (ocb->ccb != NULL)
2715 callout_stop(&ocb->timer);
2716 if (ntohl(ocb->orb[4]) & 0xffff) {
2717 bus_dmamap_sync(sdev->target->sbp->dmat,
2719 (ntohl(ocb->orb[4]) & ORB_CMD_IN) ?
2720 BUS_DMASYNC_POSTREAD :
2721 BUS_DMASYNC_POSTWRITE);
2722 bus_dmamap_unload(sdev->target->sbp->dmat,
2725 if (!use_doorbell) {
2726 if (sbp_status->src == SRC_NO_NEXT) {
2728 sbp_orb_pointer(sdev, next);
2729 else if (order > 0) {
2731 * Unordered execution
2732 * We need to send pointer for
2735 sdev->flags |= ORB_LINK_DEAD;
2740 * XXX this is not correct for unordered
2743 if (sdev->last_ocb != NULL) {
2744 sbp_free_ocb(sdev, sdev->last_ocb);
2746 sdev->last_ocb = ocb;
2748 sbp_status->src == SRC_NO_NEXT)
2756 if (ocb && order > 0) {
2757 device_printf(sdev->target->sbp->fd.dev,
2758 "%s:%s unordered execution order:%d\n",
2759 __func__, sdev->bustgtlun, order);
2765 static struct sbp_ocb *
2766 sbp_enqueue_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb)
2768 struct sbp_ocb *prev, *prev2;
2770 SBP_LOCK_ASSERT(sdev->target->sbp);
2772 device_printf(sdev->target->sbp->fd.dev,
2773 #if defined(__DragonFly__) || __FreeBSD_version < 500000
2774 "%s:%s 0x%08x\n", __func__, sdev->bustgtlun, ocb->bus_addr);
2776 "%s:%s 0x%08jx\n", __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr);
2779 prev2 = prev = STAILQ_LAST(&sdev->ocbs, sbp_ocb, ocb);
2780 STAILQ_INSERT_TAIL(&sdev->ocbs, ocb, ocb);
2782 if (ocb->ccb != NULL) {
2783 callout_reset_sbt(&ocb->timer,
2784 SBT_1MS * ocb->ccb->ccb_h.timeout, 0, sbp_timeout,
2788 if (use_doorbell && prev == NULL)
2789 prev2 = sdev->last_ocb;
2791 if (prev2 != NULL && (ocb->sdev->flags & ORB_LINK_DEAD) == 0) {
2793 #if defined(__DragonFly__) || __FreeBSD_version < 500000
2794 printf("linking chain 0x%x -> 0x%x\n",
2795 prev2->bus_addr, ocb->bus_addr);
2797 printf("linking chain 0x%jx -> 0x%jx\n",
2798 (uintmax_t)prev2->bus_addr, (uintmax_t)ocb->bus_addr);
2802 * Suppress compiler optimization so that orb[1] must be written first.
2803 * XXX We may need an explicit memory barrier for other architectures
2804 * other than i386/amd64.
2806 *(volatile uint32_t *)&prev2->orb[1] = htonl(ocb->bus_addr);
2807 *(volatile uint32_t *)&prev2->orb[0] = 0;
2813 static struct sbp_ocb *
2814 sbp_get_ocb(struct sbp_dev *sdev)
2816 struct sbp_ocb *ocb;
2818 SBP_LOCK_ASSERT(sdev->target->sbp);
2819 ocb = STAILQ_FIRST(&sdev->free_ocbs);
2821 sdev->flags |= ORB_SHORTAGE;
2822 printf("ocb shortage!!!\n");
2825 STAILQ_REMOVE_HEAD(&sdev->free_ocbs, ocb);
2831 sbp_free_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb)
2836 SBP_LOCK_ASSERT(sdev->target->sbp);
2837 STAILQ_INSERT_TAIL(&sdev->free_ocbs, ocb, ocb);
2838 if ((sdev->flags & ORB_SHORTAGE) != 0) {
2841 sdev->flags &= ~ORB_SHORTAGE;
2842 count = sdev->freeze;
2844 xpt_release_devq(sdev->path, count, TRUE);
2849 sbp_abort_ocb(struct sbp_ocb *ocb, int status)
2851 struct sbp_dev *sdev;
2854 SBP_LOCK_ASSERT(sdev->target->sbp);
2856 device_printf(sdev->target->sbp->fd.dev,
2857 #if defined(__DragonFly__) || __FreeBSD_version < 500000
2858 "%s:%s 0x%x\n", __func__, sdev->bustgtlun, ocb->bus_addr);
2860 "%s:%s 0x%jx\n", __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr);
2864 if (ocb->ccb != NULL)
2865 sbp_print_scsi_cmd(ocb);
2867 if (ntohl(ocb->orb[4]) & 0xffff) {
2868 bus_dmamap_sync(sdev->target->sbp->dmat, ocb->dmamap,
2869 (ntohl(ocb->orb[4]) & ORB_CMD_IN) ?
2870 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2871 bus_dmamap_unload(sdev->target->sbp->dmat, ocb->dmamap);
2873 if (ocb->ccb != NULL) {
2874 callout_stop(&ocb->timer);
2875 ocb->ccb->ccb_h.status = status;
2878 sbp_free_ocb(sdev, ocb);
2882 sbp_abort_all_ocbs(struct sbp_dev *sdev, int status)
2884 struct sbp_ocb *ocb, *next;
2885 STAILQ_HEAD(, sbp_ocb) temp;
2888 SBP_LOCK_ASSERT(sdev->target->sbp);
2889 STAILQ_CONCAT(&temp, &sdev->ocbs);
2890 STAILQ_INIT(&sdev->ocbs);
2892 STAILQ_FOREACH_SAFE(ocb, &temp, ocb, next) {
2893 sbp_abort_ocb(ocb, status);
2895 if (sdev->last_ocb != NULL) {
2896 sbp_free_ocb(sdev, sdev->last_ocb);
2897 sdev->last_ocb = NULL;
2901 static devclass_t sbp_devclass;
2903 static device_method_t sbp_methods[] = {
2904 /* device interface */
2905 DEVMETHOD(device_identify, sbp_identify),
2906 DEVMETHOD(device_probe, sbp_probe),
2907 DEVMETHOD(device_attach, sbp_attach),
2908 DEVMETHOD(device_detach, sbp_detach),
2909 DEVMETHOD(device_shutdown, sbp_shutdown),
2914 static driver_t sbp_driver = {
2917 sizeof(struct sbp_softc),
2919 #ifdef __DragonFly__
2920 DECLARE_DUMMY_MODULE(sbp);
2922 DRIVER_MODULE(sbp, firewire, sbp_driver, sbp_devclass, 0, 0);
2923 MODULE_VERSION(sbp, 1);
2924 MODULE_DEPEND(sbp, firewire, 1, 1, 1);
2925 MODULE_DEPEND(sbp, cam, 1, 1, 1);