3 * Hidetoshi Shimokawa. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
16 * This product includes software developed by Hidetoshi Shimokawa.
18 * 4. Neither the name of the author nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/systm.h>
40 #include <sys/sysctl.h>
41 #include <sys/types.h>
43 #include <sys/malloc.h>
44 #include <sys/endian.h>
47 #include <machine/bus.h>
49 #include <dev/firewire/firewire.h>
50 #include <dev/firewire/firewirereg.h>
51 #include <dev/firewire/iec13213.h>
52 #include <dev/firewire/sbp.h>
53 #include <dev/firewire/fwmem.h>
56 #include <cam/cam_ccb.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_xpt_sim.h>
59 #include <cam/cam_debug.h>
60 #include <cam/cam_periph.h>
61 #include <cam/scsi/scsi_all.h>
62 #include <cam/scsi/scsi_message.h>
64 #define SBP_TARG_RECV_LEN 8
65 #define MAX_INITIATORS 8
70 * management/command block agent registers
72 * BASE 0xffff f001 0000 management port
73 * BASE 0xffff f001 0020 command port for login id 0
74 * BASE 0xffff f001 0040 command port for login id 1
77 #define SBP_TARG_MGM 0x10000 /* offset from 0xffff f000 000 */
78 #define SBP_TARG_BIND_HI 0xffff
79 #define SBP_TARG_BIND_LO(l) (0xf0000000 + SBP_TARG_MGM + 0x20 * ((l) + 1))
80 #define SBP_TARG_BIND_START (((u_int64_t)SBP_TARG_BIND_HI << 32) | \
82 #define SBP_TARG_BIND_END (((u_int64_t)SBP_TARG_BIND_HI << 32) | \
83 SBP_TARG_BIND_LO(MAX_LOGINS))
84 #define SBP_TARG_LOGIN_ID(lo) (((lo) - SBP_TARG_BIND_LO(0))/0x20)
88 #define FETCH_POINTER 2
90 #define F_LINK_ACTIVE (1 << 0)
91 #define F_ATIO_STARVED (1 << 1)
92 #define F_LOGIN (1 << 2)
93 #define F_HOLD (1 << 3)
94 #define F_FREEZED (1 << 4)
96 static MALLOC_DEFINE(M_SBP_TARG, "sbp_targ", "SBP-II/FireWire target mode");
100 SYSCTL_INT(_debug, OID_AUTO, sbp_targ_debug, CTLFLAG_RW, &debug, 0,
101 "SBP target mode debug flag");
103 struct sbp_targ_login {
104 struct sbp_targ_lstate *lstate;
105 struct fw_device *fwdev;
106 struct sbp_login_res loginres;
111 STAILQ_HEAD(, orb_info) orbs;
112 STAILQ_ENTRY(sbp_targ_login) link;
117 struct callout hold_callout;
120 struct sbp_targ_lstate {
122 struct sbp_targ_softc *sc;
123 struct cam_path *path;
124 struct ccb_hdr_slist accept_tios;
125 struct ccb_hdr_slist immed_notifies;
126 struct crom_chunk model;
128 STAILQ_HEAD(, sbp_targ_login) logins;
131 struct sbp_targ_softc {
132 struct firewire_dev_comm fd;
134 struct cam_path *path;
138 struct crom_chunk unit;
139 struct sbp_targ_lstate *lstate[MAX_LUN];
140 struct sbp_targ_lstate *black_hole;
141 struct sbp_targ_login *logins[MAX_LOGINS];
144 #define SBP_LOCK(sc) mtx_lock(&(sc)->mtx)
145 #define SBP_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
148 #if BYTE_ORDER == BIG_ENDIAN
155 page_table_present:1,
159 uint32_t data_size:16,
161 page_table_present:1,
172 #if BYTE_ORDER == BIG_ENDIAN
189 * Urestricted page table format
190 * states that the segment length
191 * and high base addr are in the first
192 * 32 bits and the base low is in
195 struct unrestricted_page_table_fmt {
196 uint16_t segment_len;
197 uint16_t segment_base_high;
198 uint32_t segment_base_low;
203 struct sbp_targ_softc *sc;
204 struct fw_device *fwdev;
205 struct sbp_targ_login *login;
207 struct ccb_accept_tio *atio;
209 #define ORBI_STATUS_NONE 0
210 #define ORBI_STATUS_FETCH 1
211 #define ORBI_STATUS_ATIO 2
212 #define ORBI_STATUS_CTIO 3
213 #define ORBI_STATUS_STATUS 4
214 #define ORBI_STATUS_POINTER 5
215 #define ORBI_STATUS_ABORTED 7
222 STAILQ_ENTRY(orb_info) link;
224 struct unrestricted_page_table_fmt *page_table;
225 struct unrestricted_page_table_fmt *cur_pte;
226 struct unrestricted_page_table_fmt *last_pte;
227 uint32_t last_block_read;
228 struct sbp_status status;
231 static char *orb_fun_name[] = {
235 static void sbp_targ_recv(struct fw_xfer *);
236 static void sbp_targ_fetch_orb(struct sbp_targ_softc *, struct fw_device *,
237 uint16_t, uint32_t, struct sbp_targ_login *, int);
238 static void sbp_targ_xfer_pt(struct orb_info *);
239 static void sbp_targ_abort(struct sbp_targ_softc *, struct orb_info *);
242 sbp_targ_identify(driver_t *driver, device_t parent)
244 BUS_ADD_CHILD(parent, 0, "sbp_targ", device_get_unit(parent));
248 sbp_targ_probe(device_t dev)
252 pa = device_get_parent(dev);
253 if (device_get_unit(dev) != device_get_unit(pa)) {
257 device_set_desc(dev, "SBP-2/SCSI over FireWire target mode");
262 sbp_targ_dealloc_login(struct sbp_targ_login *login)
264 struct orb_info *orbi, *next;
267 printf("%s: login = NULL\n", __func__);
270 for (orbi = STAILQ_FIRST(&login->orbs); orbi != NULL; orbi = next) {
271 next = STAILQ_NEXT(orbi, link);
273 printf("%s: free orbi %p\n", __func__, orbi);
274 free(orbi, M_SBP_TARG);
277 callout_stop(&login->hold_callout);
279 STAILQ_REMOVE(&login->lstate->logins, login, sbp_targ_login, link);
280 login->lstate->sc->logins[login->id] = NULL;
282 printf("%s: free login %p\n", __func__, login);
283 free((void *)login, M_SBP_TARG);
288 sbp_targ_hold_expire(void *arg)
290 struct sbp_targ_login *login;
292 login = (struct sbp_targ_login *)arg;
294 if (login->flags & F_HOLD) {
295 printf("%s: login_id=%d expired\n", __func__, login->id);
296 sbp_targ_dealloc_login(login);
298 printf("%s: login_id=%d not hold\n", __func__, login->id);
303 sbp_targ_post_busreset(void *arg)
305 struct sbp_targ_softc *sc;
306 struct crom_src *src;
307 struct crom_chunk *root;
308 struct crom_chunk *unit;
309 struct sbp_targ_lstate *lstate;
310 struct sbp_targ_login *login;
313 sc = (struct sbp_targ_softc *)arg;
314 src = sc->fd.fc->crom_src;
315 root = sc->fd.fc->crom_root;
319 if ((sc->flags & F_FREEZED) == 0) {
321 sc->flags |= F_FREEZED;
322 xpt_freeze_simq(sc->sim, /*count*/1);
325 printf("%s: already freezed\n", __func__);
328 bzero(unit, sizeof(struct crom_chunk));
330 crom_add_chunk(src, root, unit, CROM_UDIR);
331 crom_add_entry(unit, CSRKEY_SPEC, CSRVAL_ANSIT10);
332 crom_add_entry(unit, CSRKEY_VER, CSRVAL_T10SBP2);
333 crom_add_entry(unit, CSRKEY_COM_SPEC, CSRVAL_ANSIT10);
334 crom_add_entry(unit, CSRKEY_COM_SET, CSRVAL_SCSI);
336 crom_add_entry(unit, CROM_MGM, SBP_TARG_MGM >> 2);
337 crom_add_entry(unit, CSRKEY_UNIT_CH, (10<<8) | 8);
339 for (i = 0; i < MAX_LUN; i++) {
340 lstate = sc->lstate[i];
343 crom_add_entry(unit, CSRKEY_FIRM_VER, 1);
344 crom_add_entry(unit, CROM_LUN, i);
345 crom_add_entry(unit, CSRKEY_MODEL, 1);
346 crom_add_simple_text(src, unit, &lstate->model, "TargetMode");
349 /* Process for reconnection hold time */
350 for (i = 0; i < MAX_LOGINS; i++) {
351 login = sc->logins[i];
354 sbp_targ_abort(sc, STAILQ_FIRST(&login->orbs));
355 if (login->flags & F_LOGIN) {
356 login->flags |= F_HOLD;
357 callout_reset(&login->hold_callout,
358 hz * login->hold_sec,
359 sbp_targ_hold_expire, (void *)login);
365 sbp_targ_post_explore(void *arg)
367 struct sbp_targ_softc *sc;
369 sc = (struct sbp_targ_softc *)arg;
371 sc->flags &= ~F_FREEZED;
372 xpt_release_simq(sc->sim, /*run queue*/TRUE);
378 sbp_targ_find_devs(struct sbp_targ_softc *sc, union ccb *ccb,
379 struct sbp_targ_lstate **lstate, int notfound_failure)
383 /* XXX 0 is the only vaild target_id */
384 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD &&
385 ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
386 *lstate = sc->black_hole;
388 printf("setting black hole for this target id(%d)\n", ccb->ccb_h.target_id);
389 return (CAM_REQ_CMP);
392 lun = ccb->ccb_h.target_lun;
394 return (CAM_LUN_INVALID);
396 *lstate = sc->lstate[lun];
398 if (notfound_failure != 0 && *lstate == NULL) {
400 printf("%s: lstate for lun is invalid, target(%d), lun(%d)\n",
401 __func__, ccb->ccb_h.target_id, lun);
402 return (CAM_PATH_INVALID);
405 printf("%s: setting lstate for tgt(%d) lun(%d)\n",
406 __func__,ccb->ccb_h.target_id, lun);
408 return (CAM_REQ_CMP);
412 sbp_targ_en_lun(struct sbp_targ_softc *sc, union ccb *ccb)
414 struct ccb_en_lun *cel = &ccb->cel;
415 struct sbp_targ_lstate *lstate;
418 status = sbp_targ_find_devs(sc, ccb, &lstate, 0);
419 if (status != CAM_REQ_CMP) {
420 ccb->ccb_h.status = status;
424 if (cel->enable != 0) {
425 if (lstate != NULL) {
426 xpt_print_path(ccb->ccb_h.path);
427 printf("Lun already enabled\n");
428 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
431 if (cel->grp6_len != 0 || cel->grp7_len != 0) {
432 ccb->ccb_h.status = CAM_REQ_INVALID;
433 printf("Non-zero Group Codes\n");
436 lstate = (struct sbp_targ_lstate *)
437 malloc(sizeof(*lstate), M_SBP_TARG, M_NOWAIT | M_ZERO);
438 if (lstate == NULL) {
439 xpt_print_path(ccb->ccb_h.path);
440 printf("Couldn't allocate lstate\n");
441 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
445 printf("%s: malloc'd lstate %p\n",__func__, lstate);
447 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD) {
448 sc->black_hole = lstate;
450 printf("Blackhole set due to target id == %d\n",
451 ccb->ccb_h.target_id);
453 sc->lstate[ccb->ccb_h.target_lun] = lstate;
455 memset(lstate, 0, sizeof(*lstate));
457 status = xpt_create_path(&lstate->path, /*periph*/NULL,
458 xpt_path_path_id(ccb->ccb_h.path),
459 xpt_path_target_id(ccb->ccb_h.path),
460 xpt_path_lun_id(ccb->ccb_h.path));
461 if (status != CAM_REQ_CMP) {
462 free(lstate, M_SBP_TARG);
464 xpt_print_path(ccb->ccb_h.path);
465 printf("Couldn't allocate path\n");
466 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
469 SLIST_INIT(&lstate->accept_tios);
470 SLIST_INIT(&lstate->immed_notifies);
471 STAILQ_INIT(&lstate->logins);
473 ccb->ccb_h.status = CAM_REQ_CMP;
474 xpt_print_path(ccb->ccb_h.path);
475 printf("Lun now enabled for target mode\n");
477 sc->fd.fc->ibr(sc->fd.fc);
479 struct sbp_targ_login *login, *next;
481 if (lstate == NULL) {
482 ccb->ccb_h.status = CAM_LUN_INVALID;
483 printf("Invalid lstate for this target\n");
486 ccb->ccb_h.status = CAM_REQ_CMP;
488 if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
489 printf("ATIOs pending\n");
490 ccb->ccb_h.status = CAM_REQ_INVALID;
493 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
494 printf("INOTs pending\n");
495 ccb->ccb_h.status = CAM_REQ_INVALID;
498 if (ccb->ccb_h.status != CAM_REQ_CMP) {
499 printf("status != CAM_REQ_CMP\n");
503 xpt_print_path(ccb->ccb_h.path);
504 printf("Target mode disabled\n");
505 xpt_free_path(lstate->path);
507 for (login = STAILQ_FIRST(&lstate->logins); login != NULL;
509 next = STAILQ_NEXT(login, link);
510 sbp_targ_dealloc_login(login);
513 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD)
514 sc->black_hole = NULL;
516 sc->lstate[ccb->ccb_h.target_lun] = NULL;
518 printf("%s: free lstate %p\n", __func__, lstate);
519 free(lstate, M_SBP_TARG);
523 sc->fd.fc->ibr(sc->fd.fc);
528 sbp_targ_send_lstate_events(struct sbp_targ_softc *sc,
529 struct sbp_targ_lstate *lstate)
532 struct ccb_hdr *ccbh;
533 struct ccb_immediate_notify *inot;
535 printf("%s: not implemented yet\n", __func__);
541 sbp_targ_remove_orb_info_locked(struct sbp_targ_login *login, struct orb_info *orbi)
543 STAILQ_REMOVE(&login->orbs, orbi, orb_info, link);
547 sbp_targ_remove_orb_info(struct sbp_targ_login *login, struct orb_info *orbi)
550 STAILQ_REMOVE(&login->orbs, orbi, orb_info, link);
551 SBP_UNLOCK(orbi->sc);
555 * tag_id/init_id encoding
557 * tag_id and init_id has only 32bit for each.
558 * scsi_target can handle very limited number(up to 15) of init_id.
559 * we have to encode 48bit orb and 64bit EUI64 into these
562 * tag_id represents lower 32bit of ORB address.
563 * init_id represents login_id.
567 static struct orb_info *
568 sbp_targ_get_orb_info(struct sbp_targ_lstate *lstate,
569 u_int tag_id, u_int init_id)
571 struct sbp_targ_login *login;
572 struct orb_info *orbi;
574 login = lstate->sc->logins[init_id];
576 printf("%s: no such login\n", __func__);
579 STAILQ_FOREACH(orbi, &login->orbs, link)
580 if (orbi->orb_lo == tag_id)
582 printf("%s: orb not found tag_id=0x%08x init_id=%d\n",
583 __func__, tag_id, init_id);
590 sbp_targ_abort(struct sbp_targ_softc *sc, struct orb_info *orbi)
592 struct orb_info *norbi;
595 for (; orbi != NULL; orbi = norbi) {
596 printf("%s: status=%d ccb=%p\n", __func__, orbi->state, orbi->ccb);
597 norbi = STAILQ_NEXT(orbi, link);
598 if (orbi->state != ORBI_STATUS_ABORTED) {
599 if (orbi->ccb != NULL) {
600 orbi->ccb->ccb_h.status = CAM_REQ_ABORTED;
604 if (orbi->state <= ORBI_STATUS_ATIO) {
605 sbp_targ_remove_orb_info_locked(orbi->login, orbi);
607 printf("%s: free orbi %p\n", __func__, orbi);
608 free(orbi, M_SBP_TARG);
611 orbi->state = ORBI_STATUS_ABORTED;
618 sbp_targ_free_orbi(struct fw_xfer *xfer)
620 struct orb_info *orbi;
622 if (xfer->resp != 0) {
624 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
626 orbi = (struct orb_info *)xfer->sc;
627 if ( orbi->page_table != NULL ) {
629 printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table);
630 free(orbi->page_table, M_SBP_TARG);
631 orbi->page_table = NULL;
634 printf("%s: free orbi %p\n", __func__, orbi);
635 free(orbi, M_SBP_TARG);
641 sbp_targ_status_FIFO(struct orb_info *orbi,
642 uint32_t fifo_hi, uint32_t fifo_lo, int dequeue)
644 struct fw_xfer *xfer;
647 sbp_targ_remove_orb_info(orbi->login, orbi);
649 xfer = fwmem_write_block(orbi->fwdev, (void *)orbi,
650 /*spd*/FWSPD_S400, fifo_hi, fifo_lo,
651 sizeof(uint32_t) * (orbi->status.len + 1), (char *)&orbi->status,
656 printf("%s: xfer == NULL\n", __func__);
661 * Generate the appropriate CAM status for the
665 sbp_targ_send_status(struct orb_info *orbi, union ccb *ccb)
667 struct sbp_status *sbp_status;
669 struct orb_info *norbi;
672 sbp_status = &orbi->status;
674 orbi->state = ORBI_STATUS_STATUS;
676 sbp_status->resp = 0; /* XXX */
677 sbp_status->status = 0; /* XXX */
678 sbp_status->dead = 0; /* XXX */
680 ccb->ccb_h.status= CAM_REQ_CMP;
682 switch (ccb->csio.scsi_status) {
685 printf("%s: STATUS_OK\n", __func__);
688 case SCSI_STATUS_CHECK_COND:
690 printf("%s: STATUS SCSI_STATUS_CHECK_COND\n", __func__);
691 goto process_scsi_status;
692 case SCSI_STATUS_BUSY:
694 printf("%s: STATUS SCSI_STATUS_BUSY\n", __func__);
695 goto process_scsi_status;
696 case SCSI_STATUS_CMD_TERMINATED:
699 struct sbp_cmd_status *sbp_cmd_status;
700 struct scsi_sense_data *sense;
701 int error_code, sense_key, asc, ascq;
708 sbp_cmd_status = (struct sbp_cmd_status *)&sbp_status->data[0];
709 sbp_cmd_status->status = ccb->csio.scsi_status;
710 sense = &ccb->csio.sense_data;
712 #if 0 /* XXX What we should do? */
714 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link));
716 norbi = STAILQ_NEXT(orbi, link);
718 printf("%s: status=%d\n", __func__, norbi->state);
719 if (norbi->ccb != NULL) {
720 norbi->ccb->ccb_h.status = CAM_REQ_ABORTED;
721 xpt_done(norbi->ccb);
724 sbp_targ_remove_orb_info_locked(orbi->login, norbi);
725 norbi = STAILQ_NEXT(norbi, link);
726 free(norbi, M_SBP_TARG);
731 sense_len = ccb->csio.sense_len - ccb->csio.sense_resid;
732 scsi_extract_sense_len(sense, sense_len, &error_code,
733 &sense_key, &asc, &ascq, /*show_errors*/ 0);
735 switch (error_code) {
736 case SSD_CURRENT_ERROR:
737 case SSD_DESC_CURRENT_ERROR:
738 sbp_cmd_status->sfmt = SBP_SFMT_CURR;
741 sbp_cmd_status->sfmt = SBP_SFMT_DEFER;
745 if (scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &info,
748 sbp_cmd_status->valid = 1;
751 sbp_cmd_status->info = htobe32(info_trunc);
753 sbp_cmd_status->valid = 0;
756 sbp_cmd_status->s_key = sense_key;
758 if (scsi_get_stream_info(sense, sense_len, NULL,
759 &stream_bits) == 0) {
760 sbp_cmd_status->mark =
761 (stream_bits & SSD_FILEMARK) ? 1 : 0;
762 sbp_cmd_status->eom =
763 (stream_bits & SSD_EOM) ? 1 : 0;
764 sbp_cmd_status->ill_len =
765 (stream_bits & SSD_ILI) ? 1 : 0;
767 sbp_cmd_status->mark = 0;
768 sbp_cmd_status->eom = 0;
769 sbp_cmd_status->ill_len = 0;
773 /* add_sense_code(_qual), info, cmd_spec_info */
776 if (scsi_get_sense_info(sense, sense_len, SSD_DESC_COMMAND,
777 &info, &sinfo) == 0) {
778 uint32_t cmdspec_trunc;
780 cmdspec_trunc = info;
782 sbp_cmd_status->cdb = htobe32(cmdspec_trunc);
785 sbp_cmd_status->s_code = asc;
786 sbp_cmd_status->s_qlfr = ascq;
788 if (scsi_get_sense_info(sense, sense_len, SSD_DESC_FRU, &info,
790 sbp_cmd_status->fru = (uint8_t)info;
793 sbp_cmd_status->fru = 0;
796 if (scsi_get_sks(sense, sense_len, sks) == 0) {
797 bcopy(sks, &sbp_cmd_status->s_keydep[0], sizeof(sks));
799 ccb->ccb_h.status |= CAM_SENT_SENSE;
805 printf("%s: unknown scsi status 0x%x\n", __func__,
810 sbp_targ_status_FIFO(orbi,
811 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1);
815 * Invoked as a callback handler from fwmem_read/write_block
817 * Process read/write of initiator address space
818 * completion and pass status onto the backend target.
819 * If this is a partial read/write for a CCB then
820 * we decrement the orbi's refcount to indicate
821 * the status of the read/write is complete
824 sbp_targ_cam_done(struct fw_xfer *xfer)
826 struct orb_info *orbi;
829 orbi = (struct orb_info *)xfer->sc;
832 printf("%s: resp=%d refcount=%d\n", __func__,
833 xfer->resp, orbi->refcount);
835 if (xfer->resp != 0) {
836 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
837 orbi->status.resp = SBP_TRANS_FAIL;
838 orbi->status.status = OBJ_DATA | SBE_TIMEOUT/*XXX*/;
839 orbi->status.dead = 1;
840 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link));
846 if (orbi->refcount == 0) {
848 if (orbi->state == ORBI_STATUS_ABORTED) {
850 printf("%s: orbi aborted\n", __func__);
851 sbp_targ_remove_orb_info(orbi->login, orbi);
852 if (orbi->page_table != NULL) {
854 printf("%s: free orbi->page_table %p\n",
855 __func__, orbi->page_table);
856 free(orbi->page_table, M_SBP_TARG);
859 printf("%s: free orbi %p\n", __func__, orbi);
860 free(orbi, M_SBP_TARG);
862 } else if (orbi->status.resp == ORBI_STATUS_NONE) {
863 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
865 printf("%s: CAM_SEND_STATUS set %0x\n", __func__, ccb->ccb_h.flags);
866 sbp_targ_send_status(orbi, ccb);
869 printf("%s: CAM_SEND_STATUS not set %0x\n", __func__, ccb->ccb_h.flags);
870 ccb->ccb_h.status = CAM_REQ_CMP;
874 SBP_UNLOCK(orbi->sc);
876 orbi->status.len = 1;
877 sbp_targ_status_FIFO(orbi,
878 orbi->login->fifo_hi, orbi->login->fifo_lo,
880 ccb->ccb_h.status = CAM_REQ_ABORTED;
883 SBP_UNLOCK(orbi->sc);
891 sbp_targ_abort_ccb(struct sbp_targ_softc *sc, union ccb *ccb)
894 struct sbp_targ_lstate *lstate;
895 struct ccb_hdr_slist *list;
896 struct ccb_hdr *curelm;
900 status = sbp_targ_find_devs(sc, ccb, &lstate, 0);
901 if (status != CAM_REQ_CMP)
904 accb = ccb->cab.abort_ccb;
906 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
907 list = &lstate->accept_tios;
908 else if (accb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY)
909 list = &lstate->immed_notifies;
911 return (CAM_UA_ABORT);
913 curelm = SLIST_FIRST(list);
915 if (curelm == &accb->ccb_h) {
917 SLIST_REMOVE_HEAD(list, sim_links.sle);
919 while (curelm != NULL) {
920 struct ccb_hdr *nextelm;
922 nextelm = SLIST_NEXT(curelm, sim_links.sle);
923 if (nextelm == &accb->ccb_h) {
925 SLIST_NEXT(curelm, sim_links.sle) =
926 SLIST_NEXT(nextelm, sim_links.sle);
933 accb->ccb_h.status = CAM_REQ_ABORTED;
935 return (CAM_REQ_CMP);
937 printf("%s: not found\n", __func__);
938 return (CAM_PATH_INVALID);
942 * directly execute a read or write to the initiator
943 * address space and set hand(sbp_targ_cam_done) to
944 * process the completion from the SIM to the target.
945 * set orbi->refcount to inidicate that a read/write
946 * is inflight to/from the initiator.
949 sbp_targ_xfer_buf(struct orb_info *orbi, u_int offset,
950 uint16_t dst_hi, uint32_t dst_lo, u_int size,
951 void (*hand)(struct fw_xfer *))
953 struct fw_xfer *xfer;
954 u_int len, ccb_dir, off = 0;
958 printf("%s: offset=%d size=%d\n", __func__, offset, size);
959 ccb_dir = orbi->ccb->ccb_h.flags & CAM_DIR_MASK;
960 ptr = (char *)orbi->ccb->csio.data_ptr + offset;
963 /* XXX assume dst_lo + off doesn't overflow */
964 len = MIN(size, 2048 /* XXX */);
967 if (ccb_dir == CAM_DIR_OUT) {
969 printf("%s: CAM_DIR_OUT --> read block in?\n",__func__);
970 xfer = fwmem_read_block(orbi->fwdev,
971 (void *)orbi, /*spd*/FWSPD_S400,
972 dst_hi, dst_lo + off, len,
976 printf("%s: CAM_DIR_IN --> write block out?\n",__func__);
977 xfer = fwmem_write_block(orbi->fwdev,
978 (void *)orbi, /*spd*/FWSPD_S400,
979 dst_hi, dst_lo + off, len,
983 printf("%s: xfer == NULL", __func__);
984 /* XXX what should we do?? */
992 sbp_targ_pt_done(struct fw_xfer *xfer)
994 struct orb_info *orbi;
995 struct unrestricted_page_table_fmt *pt;
998 orbi = (struct orb_info *)xfer->sc;
1000 if (orbi->state == ORBI_STATUS_ABORTED) {
1002 printf("%s: orbi aborted\n", __func__);
1003 sbp_targ_remove_orb_info(orbi->login, orbi);
1005 printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table);
1006 printf("%s: free orbi %p\n", __func__, orbi);
1008 free(orbi->page_table, M_SBP_TARG);
1009 free(orbi, M_SBP_TARG);
1014 if (xfer->resp != 0) {
1015 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
1016 orbi->status.resp = SBP_TRANS_FAIL;
1017 orbi->status.status = OBJ_PT | SBE_TIMEOUT/*XXX*/;
1018 orbi->status.dead = 1;
1019 orbi->status.len = 1;
1020 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link));
1023 printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table);
1025 sbp_targ_status_FIFO(orbi,
1026 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1);
1027 free(orbi->page_table, M_SBP_TARG);
1028 orbi->page_table = NULL;
1034 * Set endianness here so we don't have
1035 * to deal with is later
1037 for (i = 0, pt = orbi->page_table; i < orbi->orb4.data_size; i++, pt++) {
1038 pt->segment_len = ntohs(pt->segment_len);
1040 printf("%s:segment_len = %u\n", __func__,pt->segment_len);
1041 pt->segment_base_high = ntohs(pt->segment_base_high);
1042 pt->segment_base_low = ntohl(pt->segment_base_low);
1045 sbp_targ_xfer_pt(orbi);
1048 if (orbi->refcount == 0)
1049 printf("%s: refcount == 0\n", __func__);
1055 static void sbp_targ_xfer_pt(struct orb_info *orbi)
1058 uint32_t res, offset, len;
1062 printf("%s: dxfer_len=%d\n", __func__, ccb->csio.dxfer_len);
1063 res = ccb->csio.dxfer_len;
1065 * If the page table required multiple CTIO's to
1066 * complete, then cur_pte is non NULL
1067 * and we need to start from the last position
1068 * If this is the first pass over a page table
1069 * then we just start at the beginning of the page
1072 * Parse the unrestricted page table and figure out where we need
1073 * to shove the data from this read request.
1075 for (offset = 0, len = 0; (res != 0) && (orbi->cur_pte < orbi->last_pte); offset += len) {
1076 len = MIN(orbi->cur_pte->segment_len, res);
1079 printf("%s:page_table: %04x:%08x segment_len(%u) res(%u) len(%u)\n",
1080 __func__, orbi->cur_pte->segment_base_high,
1081 orbi->cur_pte->segment_base_low,
1082 orbi->cur_pte->segment_len,
1084 sbp_targ_xfer_buf(orbi, offset,
1085 orbi->cur_pte->segment_base_high,
1086 orbi->cur_pte->segment_base_low,
1087 len, sbp_targ_cam_done);
1089 * If we have only written partially to
1090 * this page table, then we need to save
1091 * our position for the next CTIO. If we
1092 * have completed the page table, then we
1093 * are safe to move on to the next entry.
1095 if (len == orbi->cur_pte->segment_len) {
1098 uint32_t saved_base_low;
1100 /* Handle transfers that cross a 4GB boundary. */
1101 saved_base_low = orbi->cur_pte->segment_base_low;
1102 orbi->cur_pte->segment_base_low += len;
1103 if (orbi->cur_pte->segment_base_low < saved_base_low)
1104 orbi->cur_pte->segment_base_high++;
1106 orbi->cur_pte->segment_len -= len;
1110 printf("%s: base_low(%08x) page_table_off(%p) last_block(%u)\n",
1111 __func__, orbi->cur_pte->segment_base_low,
1112 orbi->cur_pte, orbi->last_block_read);
1115 printf("Warning - short pt encountered. "
1116 "Could not transfer all data.\n");
1121 * Create page table in local memory
1122 * and transfer it from the initiator
1123 * in order to know where we are supposed
1128 sbp_targ_fetch_pt(struct orb_info *orbi)
1130 struct fw_xfer *xfer;
1133 * Pull in page table from initiator
1134 * and setup for data from our
1137 if (orbi->page_table == NULL) {
1138 orbi->page_table = malloc(orbi->orb4.data_size*
1139 sizeof(struct unrestricted_page_table_fmt),
1140 M_SBP_TARG, M_NOWAIT|M_ZERO);
1141 if (orbi->page_table == NULL)
1143 orbi->cur_pte = orbi->page_table;
1144 orbi->last_pte = orbi->page_table + orbi->orb4.data_size;
1145 orbi->last_block_read = orbi->orb4.data_size;
1146 if (debug && orbi->page_table != NULL)
1147 printf("%s: malloc'd orbi->page_table(%p), orb4.data_size(%u)\n",
1148 __func__, orbi->page_table, orbi->orb4.data_size);
1150 xfer = fwmem_read_block(orbi->fwdev, (void *)orbi, /*spd*/FWSPD_S400,
1151 orbi->data_hi, orbi->data_lo, orbi->orb4.data_size*
1152 sizeof(struct unrestricted_page_table_fmt),
1153 (void *)orbi->page_table, sbp_targ_pt_done);
1159 * This is a CTIO for a page table we have
1160 * already malloc'd, so just directly invoke
1161 * the xfer function on the orbi.
1163 sbp_targ_xfer_pt(orbi);
1167 orbi->ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1169 printf("%s: free orbi->page_table %p due to xfer == NULL\n", __func__, orbi->page_table);
1170 if (orbi->page_table != NULL) {
1171 free(orbi->page_table, M_SBP_TARG);
1172 orbi->page_table = NULL;
1174 xpt_done(orbi->ccb);
1179 sbp_targ_action1(struct cam_sim *sim, union ccb *ccb)
1181 struct sbp_targ_softc *sc;
1182 struct sbp_targ_lstate *lstate;
1186 sc = (struct sbp_targ_softc *)cam_sim_softc(sim);
1188 status = sbp_targ_find_devs(sc, ccb, &lstate, TRUE);
1190 switch (ccb->ccb_h.func_code) {
1191 case XPT_CONT_TARGET_IO:
1193 struct orb_info *orbi;
1196 printf("%s: XPT_CONT_TARGET_IO (0x%08x)\n",
1197 __func__, ccb->csio.tag_id);
1199 if (status != CAM_REQ_CMP) {
1200 ccb->ccb_h.status = status;
1204 /* XXX transfer from/to initiator */
1205 orbi = sbp_targ_get_orb_info(lstate,
1206 ccb->csio.tag_id, ccb->csio.init_id);
1208 ccb->ccb_h.status = CAM_REQ_ABORTED; /* XXX */
1212 if (orbi->state == ORBI_STATUS_ABORTED) {
1214 printf("%s: ctio aborted\n", __func__);
1215 sbp_targ_remove_orb_info_locked(orbi->login, orbi);
1217 printf("%s: free orbi %p\n", __func__, orbi);
1218 free(orbi, M_SBP_TARG);
1219 ccb->ccb_h.status = CAM_REQ_ABORTED;
1223 orbi->state = ORBI_STATUS_CTIO;
1226 ccb_dir = ccb->ccb_h.flags & CAM_DIR_MASK;
1229 if (ccb->csio.dxfer_len == 0)
1230 ccb_dir = CAM_DIR_NONE;
1233 if (ccb_dir == CAM_DIR_IN && orbi->orb4.dir == 0)
1234 printf("%s: direction mismatch\n", __func__);
1236 /* check page table */
1237 if (ccb_dir != CAM_DIR_NONE && orbi->orb4.page_table_present) {
1239 printf("%s: page_table_present\n",
1241 if (orbi->orb4.page_size != 0) {
1242 printf("%s: unsupported pagesize %d != 0\n",
1243 __func__, orbi->orb4.page_size);
1244 ccb->ccb_h.status = CAM_REQ_INVALID;
1248 sbp_targ_fetch_pt(orbi);
1253 if (ccb_dir != CAM_DIR_NONE) {
1254 sbp_targ_xfer_buf(orbi, 0, orbi->data_hi,
1256 MIN(orbi->orb4.data_size, ccb->csio.dxfer_len),
1258 if ( orbi->orb4.data_size > ccb->csio.dxfer_len ) {
1259 orbi->data_lo += ccb->csio.dxfer_len;
1260 orbi->orb4.data_size -= ccb->csio.dxfer_len;
1264 if (ccb_dir == CAM_DIR_NONE) {
1265 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1268 sbp_targ_send_status(orbi, ccb);
1271 ccb->ccb_h.status = CAM_REQ_CMP;
1276 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
1277 if (status != CAM_REQ_CMP) {
1278 ccb->ccb_h.status = status;
1282 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
1284 ccb->ccb_h.status = CAM_REQ_INPROG;
1285 if ((lstate->flags & F_ATIO_STARVED) != 0) {
1286 struct sbp_targ_login *login;
1289 printf("%s: new atio arrived\n", __func__);
1290 lstate->flags &= ~F_ATIO_STARVED;
1291 STAILQ_FOREACH(login, &lstate->logins, link)
1292 if ((login->flags & F_ATIO_STARVED) != 0) {
1293 login->flags &= ~F_ATIO_STARVED;
1294 sbp_targ_fetch_orb(lstate->sc,
1296 login->last_hi, login->last_lo,
1301 case XPT_NOTIFY_ACKNOWLEDGE: /* recycle notify ack */
1302 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */
1303 if (status != CAM_REQ_CMP) {
1304 ccb->ccb_h.status = status;
1308 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
1310 ccb->ccb_h.status = CAM_REQ_INPROG;
1311 sbp_targ_send_lstate_events(sc, lstate);
1314 sbp_targ_en_lun(sc, ccb);
1319 struct ccb_pathinq *cpi = &ccb->cpi;
1321 cpi->version_num = 1; /* XXX??? */
1322 cpi->hba_inquiry = PI_TAG_ABLE;
1323 cpi->target_sprt = PIT_PROCESSOR
1326 cpi->transport = XPORT_SPI; /* FIXME add XPORT_FW type to cam */
1327 cpi->hba_misc = PIM_NOBUSRESET | PIM_NO_6_BYTE;
1328 cpi->hba_eng_cnt = 0;
1329 cpi->max_target = 7; /* XXX */
1330 cpi->max_lun = MAX_LUN - 1;
1331 cpi->initiator_id = 7; /* XXX */
1332 cpi->bus_id = sim->bus_id;
1333 cpi->base_transfer_speed = 400 * 1000 / 8;
1334 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1335 strncpy(cpi->hba_vid, "SBP_TARG", HBA_IDLEN);
1336 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
1337 cpi->unit_number = sim->unit_number;
1339 cpi->ccb_h.status = CAM_REQ_CMP;
1345 union ccb *accb = ccb->cab.abort_ccb;
1347 switch (accb->ccb_h.func_code) {
1348 case XPT_ACCEPT_TARGET_IO:
1349 case XPT_IMMEDIATE_NOTIFY:
1350 ccb->ccb_h.status = sbp_targ_abort_ccb(sc, ccb);
1352 case XPT_CONT_TARGET_IO:
1354 ccb->ccb_h.status = CAM_UA_ABORT;
1357 printf("%s: aborting unknown function %d\n",
1358 __func__, accb->ccb_h.func_code);
1359 ccb->ccb_h.status = CAM_REQ_INVALID;
1365 #ifdef CAM_NEW_TRAN_CODE
1366 case XPT_SET_TRAN_SETTINGS:
1367 ccb->ccb_h.status = CAM_REQ_INVALID;
1370 case XPT_GET_TRAN_SETTINGS:
1372 struct ccb_trans_settings *cts = &ccb->cts;
1373 struct ccb_trans_settings_scsi *scsi =
1374 &cts->proto_specific.scsi;
1375 struct ccb_trans_settings_spi *spi =
1376 &cts->xport_specific.spi;
1378 cts->protocol = PROTO_SCSI;
1379 cts->protocol_version = SCSI_REV_2;
1380 cts->transport = XPORT_FW; /* should have a FireWire */
1381 cts->transport_version = 2;
1382 spi->valid = CTS_SPI_VALID_DISC;
1383 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
1384 scsi->valid = CTS_SCSI_VALID_TQ;
1385 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1387 printf("%s:%d:%d XPT_GET_TRAN_SETTINGS:\n",
1388 device_get_nameunit(sc->fd.dev),
1389 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
1391 cts->ccb_h.status = CAM_REQ_CMP;
1398 printf("%s: unknown function 0x%x\n",
1399 __func__, ccb->ccb_h.func_code);
1400 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1408 sbp_targ_action(struct cam_sim *sim, union ccb *ccb)
1413 sbp_targ_action1(sim, ccb);
1418 sbp_targ_poll(struct cam_sim *sim)
1425 sbp_targ_cmd_handler(struct fw_xfer *xfer)
1430 struct orb_info *orbi;
1431 struct ccb_accept_tio *atio;
1435 orbi = (struct orb_info *)xfer->sc;
1436 if (xfer->resp != 0) {
1437 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
1438 orbi->status.resp = SBP_TRANS_FAIL;
1439 orbi->status.status = OBJ_ORB | SBE_TIMEOUT/*XXX*/;
1440 orbi->status.dead = 1;
1441 orbi->status.len = 1;
1442 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link));
1444 sbp_targ_status_FIFO(orbi,
1445 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1);
1449 fp = &xfer->recv.hdr;
1453 if (orbi->state == ORBI_STATUS_ABORTED) {
1454 printf("%s: aborted\n", __func__);
1455 sbp_targ_remove_orb_info(orbi->login, orbi);
1456 free(orbi, M_SBP_TARG);
1457 atio->ccb_h.status = CAM_REQ_ABORTED;
1459 xpt_done((union ccb*)atio);
1460 SBP_UNLOCK(orbi->sc);
1463 orbi->state = ORBI_STATUS_ATIO;
1466 /* swap payload except SCSI command */
1467 for (i = 0; i < 5; i++)
1468 orb[i] = ntohl(orb[i]);
1470 orb4 = (struct corb4 *)&orb[4];
1471 if (orb4->rq_fmt != 0) {
1473 printf("%s: rq_fmt(%d) != 0\n", __func__, orb4->rq_fmt);
1476 atio->ccb_h.target_id = 0; /* XXX */
1477 atio->ccb_h.target_lun = orbi->login->lstate->lun;
1478 atio->sense_len = 0;
1479 atio->tag_action = MSG_SIMPLE_TASK;
1480 atio->tag_id = orbi->orb_lo;
1481 atio->init_id = orbi->login->id;
1483 atio->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1484 bytes = (u_char *)&orb[5];
1486 printf("%s: %p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
1487 __func__, (void *)atio,
1488 bytes[0], bytes[1], bytes[2], bytes[3], bytes[4],
1489 bytes[5], bytes[6], bytes[7], bytes[8], bytes[9]);
1490 switch (bytes[0] >> 5) {
1506 /* Only copy the opcode. */
1508 printf("Reserved or VU command code type encountered\n");
1512 memcpy(atio->cdb_io.cdb_bytes, bytes, atio->cdb_len);
1514 atio->ccb_h.status |= CAM_CDB_RECVD;
1517 if ((orb[0] & (1<<31)) == 0) {
1519 printf("%s: fetch next orb\n", __func__);
1520 orbi->status.src = SRC_NEXT_EXISTS;
1521 sbp_targ_fetch_orb(orbi->sc, orbi->fwdev,
1522 orb[0], orb[1], orbi->login, FETCH_CMD);
1524 orbi->status.src = SRC_NO_NEXT;
1525 orbi->login->flags &= ~F_LINK_ACTIVE;
1528 orbi->data_hi = orb[2];
1529 orbi->data_lo = orb[3];
1533 xpt_done((union ccb*)atio);
1534 SBP_UNLOCK(orbi->sc);
1540 static struct sbp_targ_login *
1541 sbp_targ_get_login(struct sbp_targ_softc *sc, struct fw_device *fwdev, int lun)
1543 struct sbp_targ_lstate *lstate;
1544 struct sbp_targ_login *login;
1547 lstate = sc->lstate[lun];
1549 STAILQ_FOREACH(login, &lstate->logins, link)
1550 if (login->fwdev == fwdev)
1553 for (i = 0; i < MAX_LOGINS; i++)
1554 if (sc->logins[i] == NULL)
1557 printf("%s: increase MAX_LOGIN\n", __func__);
1561 login = (struct sbp_targ_login *)malloc(
1562 sizeof(struct sbp_targ_login), M_SBP_TARG, M_NOWAIT | M_ZERO);
1564 if (login == NULL) {
1565 printf("%s: malloc failed\n", __func__);
1570 login->fwdev = fwdev;
1571 login->lstate = lstate;
1572 login->last_hi = 0xffff;
1573 login->last_lo = 0xffffffff;
1574 login->hold_sec = 1;
1575 STAILQ_INIT(&login->orbs);
1576 CALLOUT_INIT(&login->hold_callout);
1577 sc->logins[i] = login;
1582 sbp_targ_mgm_handler(struct fw_xfer *xfer)
1584 struct sbp_targ_lstate *lstate;
1585 struct sbp_targ_login *login;
1589 struct orb_info *orbi;
1592 orbi = (struct orb_info *)xfer->sc;
1593 if (xfer->resp != 0) {
1594 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
1595 orbi->status.resp = SBP_TRANS_FAIL;
1596 orbi->status.status = OBJ_ORB | SBE_TIMEOUT/*XXX*/;
1597 orbi->status.dead = 1;
1598 orbi->status.len = 1;
1599 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link));
1601 sbp_targ_status_FIFO(orbi,
1602 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/0);
1606 fp = &xfer->recv.hdr;
1610 for (i = 0; i < 8; i++) {
1611 orb[i] = ntohl(orb[i]);
1613 orb4 = (struct morb4 *)&orb[4];
1615 printf("%s: %s\n", __func__, orb_fun_name[orb4->fun]);
1617 orbi->status.src = SRC_NO_NEXT;
1619 switch (orb4->fun << 16) {
1622 int exclusive = 0, lun;
1624 if (orb[4] & ORB_EXV)
1628 lstate = orbi->sc->lstate[lun];
1630 if (lun >= MAX_LUN || lstate == NULL ||
1632 STAILQ_FIRST(&lstate->logins) != NULL &&
1633 STAILQ_FIRST(&lstate->logins)->fwdev != orbi->fwdev)
1636 orbi->status.dead = 1;
1637 orbi->status.status = STATUS_ACCESS_DENY;
1638 orbi->status.len = 1;
1642 /* allocate login */
1643 login = sbp_targ_get_login(orbi->sc, orbi->fwdev, lun);
1644 if (login == NULL) {
1645 printf("%s: sbp_targ_get_login failed\n",
1647 orbi->status.dead = 1;
1648 orbi->status.status = STATUS_RES_UNAVAIL;
1649 orbi->status.len = 1;
1652 printf("%s: login id=%d\n", __func__, login->id);
1654 login->fifo_hi = orb[6];
1655 login->fifo_lo = orb[7];
1656 login->loginres.len = htons(sizeof(uint32_t) * 4);
1657 login->loginres.id = htons(login->id);
1658 login->loginres.cmd_hi = htons(SBP_TARG_BIND_HI);
1659 login->loginres.cmd_lo = htonl(SBP_TARG_BIND_LO(login->id));
1660 login->loginres.recon_hold = htons(login->hold_sec);
1662 STAILQ_INSERT_TAIL(&lstate->logins, login, link);
1663 fwmem_write_block(orbi->fwdev, NULL, /*spd*/FWSPD_S400, orb[2], orb[3],
1664 sizeof(struct sbp_login_res), (void *)&login->loginres,
1665 fw_asy_callback_free);
1666 /* XXX return status after loginres is successfully written */
1670 login = orbi->sc->logins[orb4->id];
1671 if (login != NULL && login->fwdev == orbi->fwdev) {
1672 login->flags &= ~F_HOLD;
1673 callout_stop(&login->hold_callout);
1674 printf("%s: reconnected id=%d\n",
1675 __func__, login->id);
1677 orbi->status.dead = 1;
1678 orbi->status.status = STATUS_ACCESS_DENY;
1679 printf("%s: reconnection faild id=%d\n",
1680 __func__, orb4->id);
1684 login = orbi->sc->logins[orb4->id];
1685 if (login->fwdev != orbi->fwdev) {
1686 printf("%s: wrong initiator\n", __func__);
1689 sbp_targ_dealloc_login(login);
1692 printf("%s: %s not implemented yet\n",
1693 __func__, orb_fun_name[orb4->fun]);
1696 orbi->status.len = 1;
1697 sbp_targ_status_FIFO(orbi, orb[6], orb[7], /*dequeue*/0);
1703 sbp_targ_pointer_handler(struct fw_xfer *xfer)
1705 struct orb_info *orbi;
1706 uint32_t orb0, orb1;
1708 orbi = (struct orb_info *)xfer->sc;
1709 if (xfer->resp != 0) {
1710 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
1714 orb0 = ntohl(orbi->orb[0]);
1715 orb1 = ntohl(orbi->orb[1]);
1716 if ((orb0 & (1U << 31)) != 0) {
1717 printf("%s: invalid pointer\n", __func__);
1720 sbp_targ_fetch_orb(orbi->login->lstate->sc, orbi->fwdev,
1721 (uint16_t)orb0, orb1, orbi->login, FETCH_CMD);
1723 free(orbi, M_SBP_TARG);
1729 sbp_targ_fetch_orb(struct sbp_targ_softc *sc, struct fw_device *fwdev,
1730 uint16_t orb_hi, uint32_t orb_lo, struct sbp_targ_login *login,
1733 struct orb_info *orbi;
1736 printf("%s: fetch orb %04x:%08x\n", __func__, orb_hi, orb_lo);
1737 orbi = malloc(sizeof(struct orb_info), M_SBP_TARG, M_NOWAIT | M_ZERO);
1739 printf("%s: malloc failed\n", __func__);
1743 orbi->fwdev = fwdev;
1744 orbi->login = login;
1745 orbi->orb_hi = orb_hi;
1746 orbi->orb_lo = orb_lo;
1747 orbi->status.orb_hi = htons(orb_hi);
1748 orbi->status.orb_lo = htonl(orb_lo);
1749 orbi->page_table = NULL;
1753 fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo,
1754 sizeof(uint32_t) * 8, &orbi->orb[0],
1755 sbp_targ_mgm_handler);
1758 orbi->state = ORBI_STATUS_FETCH;
1759 login->last_hi = orb_hi;
1760 login->last_lo = orb_lo;
1761 login->flags |= F_LINK_ACTIVE;
1764 orbi->atio = (struct ccb_accept_tio *)
1765 SLIST_FIRST(&login->lstate->accept_tios);
1766 if (orbi->atio == NULL) {
1768 printf("%s: no free atio\n", __func__);
1769 login->lstate->flags |= F_ATIO_STARVED;
1770 login->flags |= F_ATIO_STARVED;
1773 login->fwdev = fwdev;
1777 SLIST_REMOVE_HEAD(&login->lstate->accept_tios, sim_links.sle);
1778 STAILQ_INSERT_TAIL(&login->orbs, orbi, link);
1780 fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo,
1781 sizeof(uint32_t) * 8, &orbi->orb[0],
1782 sbp_targ_cmd_handler);
1785 orbi->state = ORBI_STATUS_POINTER;
1786 login->flags |= F_LINK_ACTIVE;
1787 fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo,
1788 sizeof(uint32_t) * 2, &orbi->orb[0],
1789 sbp_targ_pointer_handler);
1792 printf("%s: invalid mode %d\n", __func__, mode);
1797 sbp_targ_resp_callback(struct fw_xfer *xfer)
1799 struct sbp_targ_softc *sc;
1803 printf("%s: xfer=%p\n", __func__, xfer);
1804 sc = (struct sbp_targ_softc *)xfer->sc;
1805 fw_xfer_unload(xfer);
1806 xfer->recv.pay_len = SBP_TARG_RECV_LEN;
1807 xfer->hand = sbp_targ_recv;
1809 STAILQ_INSERT_TAIL(&sc->fwb.xferlist, xfer, link);
1814 sbp_targ_cmd(struct fw_xfer *xfer, struct fw_device *fwdev, int login_id,
1817 struct sbp_targ_login *login;
1818 struct sbp_targ_softc *sc;
1821 if (login_id < 0 || login_id >= MAX_LOGINS)
1822 return (RESP_ADDRESS_ERROR);
1824 sc = (struct sbp_targ_softc *)xfer->sc;
1825 login = sc->logins[login_id];
1827 return (RESP_ADDRESS_ERROR);
1829 if (login->fwdev != fwdev) {
1831 return (RESP_ADDRESS_ERROR);
1835 case 0x08: /* ORB_POINTER */
1837 printf("%s: ORB_POINTER(%d)\n", __func__, login_id);
1838 if ((login->flags & F_LINK_ACTIVE) != 0) {
1840 printf("link active (ORB_POINTER)\n");
1843 sbp_targ_fetch_orb(sc, fwdev,
1844 ntohl(xfer->recv.payload[0]),
1845 ntohl(xfer->recv.payload[1]),
1848 case 0x04: /* AGENT_RESET */
1850 printf("%s: AGENT RESET(%d)\n", __func__, login_id);
1851 login->last_hi = 0xffff;
1852 login->last_lo = 0xffffffff;
1853 sbp_targ_abort(sc, STAILQ_FIRST(&login->orbs));
1855 case 0x10: /* DOORBELL */
1857 printf("%s: DOORBELL(%d)\n", __func__, login_id);
1858 if (login->last_hi == 0xffff &&
1859 login->last_lo == 0xffffffff) {
1860 printf("%s: no previous pointer(DOORBELL)\n",
1864 if ((login->flags & F_LINK_ACTIVE) != 0) {
1866 printf("link active (DOORBELL)\n");
1869 sbp_targ_fetch_orb(sc, fwdev,
1870 login->last_hi, login->last_lo,
1871 login, FETCH_POINTER);
1873 case 0x00: /* AGENT_STATE */
1874 printf("%s: AGENT_STATE (%d:ignore)\n", __func__, login_id);
1876 case 0x14: /* UNSOLICITED_STATE_ENABLE */
1877 printf("%s: UNSOLICITED_STATE_ENABLE (%d:ignore)\n",
1878 __func__, login_id);
1881 printf("%s: invalid register %d(%d)\n",
1882 __func__, reg, login_id);
1883 rtcode = RESP_ADDRESS_ERROR;
1890 sbp_targ_mgm(struct fw_xfer *xfer, struct fw_device *fwdev)
1892 struct sbp_targ_softc *sc;
1895 sc = (struct sbp_targ_softc *)xfer->sc;
1897 fp = &xfer->recv.hdr;
1898 if (fp->mode.wreqb.tcode != FWTCODE_WREQB) {
1899 printf("%s: tcode = %d\n", __func__, fp->mode.wreqb.tcode);
1900 return (RESP_TYPE_ERROR);
1903 sbp_targ_fetch_orb(sc, fwdev,
1904 ntohl(xfer->recv.payload[0]),
1905 ntohl(xfer->recv.payload[1]),
1912 sbp_targ_recv(struct fw_xfer *xfer)
1914 struct fw_pkt *fp, *sfp;
1915 struct fw_device *fwdev;
1918 struct sbp_targ_softc *sc;
1921 sc = (struct sbp_targ_softc *)xfer->sc;
1922 fp = &xfer->recv.hdr;
1923 fwdev = fw_noderesolve_nodeid(sc->fd.fc, fp->mode.wreqb.src & 0x3f);
1924 if (fwdev == NULL) {
1925 printf("%s: cannot resolve nodeid=%d\n",
1926 __func__, fp->mode.wreqb.src & 0x3f);
1927 rtcode = RESP_TYPE_ERROR; /* XXX */
1930 lo = fp->mode.wreqb.dest_lo;
1932 if (lo == SBP_TARG_BIND_LO(-1))
1933 rtcode = sbp_targ_mgm(xfer, fwdev);
1934 else if (lo >= SBP_TARG_BIND_LO(0))
1935 rtcode = sbp_targ_cmd(xfer, fwdev, SBP_TARG_LOGIN_ID(lo),
1938 rtcode = RESP_ADDRESS_ERROR;
1942 printf("%s: rtcode = %d\n", __func__, rtcode);
1943 sfp = &xfer->send.hdr;
1944 xfer->send.spd = FWSPD_S400;
1945 xfer->hand = sbp_targ_resp_callback;
1946 sfp->mode.wres.dst = fp->mode.wreqb.src;
1947 sfp->mode.wres.tlrt = fp->mode.wreqb.tlrt;
1948 sfp->mode.wres.tcode = FWTCODE_WRES;
1949 sfp->mode.wres.rtcode = rtcode;
1950 sfp->mode.wres.pri = 0;
1952 fw_asyreq(xfer->fc, -1, xfer);
1957 sbp_targ_attach(device_t dev)
1959 struct sbp_targ_softc *sc;
1960 struct cam_devq *devq;
1961 struct firewire_comm *fc;
1963 sc = (struct sbp_targ_softc *) device_get_softc(dev);
1964 bzero((void *)sc, sizeof(struct sbp_targ_softc));
1966 mtx_init(&sc->mtx, "sbp_targ", NULL, MTX_DEF);
1967 sc->fd.fc = fc = device_get_ivars(dev);
1969 sc->fd.post_explore = (void *) sbp_targ_post_explore;
1970 sc->fd.post_busreset = (void *) sbp_targ_post_busreset;
1972 devq = cam_simq_alloc(/*maxopenings*/MAX_LUN*MAX_INITIATORS);
1976 sc->sim = cam_sim_alloc(sbp_targ_action, sbp_targ_poll,
1977 "sbp_targ", sc, device_get_unit(dev), &sc->mtx,
1978 /*untagged*/ 1, /*tagged*/ 1, devq);
1979 if (sc->sim == NULL) {
1980 cam_simq_free(devq);
1985 if (xpt_bus_register(sc->sim, dev, /*bus*/0) != CAM_SUCCESS)
1988 if (xpt_create_path(&sc->path, /*periph*/ NULL, cam_sim_path(sc->sim),
1989 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1990 xpt_bus_deregister(cam_sim_path(sc->sim));
1995 sc->fwb.start = SBP_TARG_BIND_START;
1996 sc->fwb.end = SBP_TARG_BIND_END;
1998 /* pre-allocate xfer */
1999 STAILQ_INIT(&sc->fwb.xferlist);
2000 fw_xferlist_add(&sc->fwb.xferlist, M_SBP_TARG,
2001 /*send*/ 0, /*recv*/ SBP_TARG_RECV_LEN, MAX_LUN /* XXX */,
2002 fc, (void *)sc, sbp_targ_recv);
2003 fw_bindadd(fc, &sc->fwb);
2008 cam_sim_free(sc->sim, /*free_devq*/TRUE);
2013 sbp_targ_detach(device_t dev)
2015 struct sbp_targ_softc *sc;
2016 struct sbp_targ_lstate *lstate;
2019 sc = (struct sbp_targ_softc *)device_get_softc(dev);
2020 sc->fd.post_busreset = NULL;
2023 xpt_free_path(sc->path);
2024 xpt_bus_deregister(cam_sim_path(sc->sim));
2026 cam_sim_free(sc->sim, /*free_devq*/TRUE);
2028 for (i = 0; i < MAX_LUN; i++) {
2029 lstate = sc->lstate[i];
2030 if (lstate != NULL) {
2031 xpt_free_path(lstate->path);
2032 free(lstate, M_SBP_TARG);
2035 if (sc->black_hole != NULL) {
2036 xpt_free_path(sc->black_hole->path);
2037 free(sc->black_hole, M_SBP_TARG);
2040 fw_bindremove(sc->fd.fc, &sc->fwb);
2041 fw_xferlist_remove(&sc->fwb.xferlist);
2043 mtx_destroy(&sc->mtx);
2048 static devclass_t sbp_targ_devclass;
2050 static device_method_t sbp_targ_methods[] = {
2051 /* device interface */
2052 DEVMETHOD(device_identify, sbp_targ_identify),
2053 DEVMETHOD(device_probe, sbp_targ_probe),
2054 DEVMETHOD(device_attach, sbp_targ_attach),
2055 DEVMETHOD(device_detach, sbp_targ_detach),
2059 static driver_t sbp_targ_driver = {
2062 sizeof(struct sbp_targ_softc),
2065 DRIVER_MODULE(sbp_targ, firewire, sbp_targ_driver, sbp_targ_devclass, 0, 0);
2066 MODULE_VERSION(sbp_targ, 1);
2067 MODULE_DEPEND(sbp_targ, firewire, 1, 1, 1);
2068 MODULE_DEPEND(sbp_targ, cam, 1, 1, 1);