3 * Hidetoshi Shimokawa. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
16 * This product includes software developed by Hidetoshi Shimokawa.
18 * 4. Neither the name of the author nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/systm.h>
40 #include <sys/sysctl.h>
41 #include <sys/types.h>
43 #include <sys/malloc.h>
44 #include <sys/endian.h>
47 #include <machine/bus.h>
49 #include <dev/firewire/firewire.h>
50 #include <dev/firewire/firewirereg.h>
51 #include <dev/firewire/iec13213.h>
52 #include <dev/firewire/sbp.h>
53 #include <dev/firewire/fwmem.h>
56 #include <cam/cam_ccb.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_xpt_sim.h>
59 #include <cam/cam_debug.h>
60 #include <cam/cam_periph.h>
61 #include <cam/scsi/scsi_all.h>
62 #include <cam/scsi/scsi_message.h>
64 #define SBP_TARG_RECV_LEN 8
65 #define MAX_INITIATORS 8
70 * management/command block agent registers
72 * BASE 0xffff f001 0000 management port
73 * BASE 0xffff f001 0020 command port for login id 0
74 * BASE 0xffff f001 0040 command port for login id 1
77 #define SBP_TARG_MGM 0x10000 /* offset from 0xffff f000 000 */
78 #define SBP_TARG_BIND_HI 0xffff
79 #define SBP_TARG_BIND_LO(l) (0xf0000000 + SBP_TARG_MGM + 0x20 * ((l) + 1))
80 #define SBP_TARG_BIND_START (((u_int64_t)SBP_TARG_BIND_HI << 32) | \
82 #define SBP_TARG_BIND_END (((u_int64_t)SBP_TARG_BIND_HI << 32) | \
83 SBP_TARG_BIND_LO(MAX_LOGINS))
84 #define SBP_TARG_LOGIN_ID(lo) (((lo) - SBP_TARG_BIND_LO(0))/0x20)
88 #define FETCH_POINTER 2
90 #define F_LINK_ACTIVE (1 << 0)
91 #define F_ATIO_STARVED (1 << 1)
92 #define F_LOGIN (1 << 2)
93 #define F_HOLD (1 << 3)
94 #define F_FREEZED (1 << 4)
96 static MALLOC_DEFINE(M_SBP_TARG, "sbp_targ", "SBP-II/FireWire target mode");
100 SYSCTL_INT(_debug, OID_AUTO, sbp_targ_debug, CTLFLAG_RW, &debug, 0,
101 "SBP target mode debug flag");
103 struct sbp_targ_login {
104 struct sbp_targ_lstate *lstate;
105 struct fw_device *fwdev;
106 struct sbp_login_res loginres;
111 STAILQ_HEAD(, orb_info) orbs;
112 STAILQ_ENTRY(sbp_targ_login) link;
117 struct callout hold_callout;
120 struct sbp_targ_lstate {
122 struct sbp_targ_softc *sc;
123 struct cam_path *path;
124 struct ccb_hdr_slist accept_tios;
125 struct ccb_hdr_slist immed_notifies;
126 struct crom_chunk model;
128 STAILQ_HEAD(, sbp_targ_login) logins;
131 struct sbp_targ_softc {
132 struct firewire_dev_comm fd;
134 struct cam_path *path;
138 struct crom_chunk unit;
139 struct sbp_targ_lstate *lstate[MAX_LUN];
140 struct sbp_targ_lstate *black_hole;
141 struct sbp_targ_login *logins[MAX_LOGINS];
144 #define SBP_LOCK(sc) mtx_lock(&(sc)->mtx)
145 #define SBP_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
148 #if BYTE_ORDER == BIG_ENDIAN
155 page_table_present:1,
159 uint32_t data_size:16,
161 page_table_present:1,
172 #if BYTE_ORDER == BIG_ENDIAN
189 * Urestricted page table format
190 * states that the segment length
191 * and high base addr are in the first
192 * 32 bits and the base low is in
195 struct unrestricted_page_table_fmt {
196 uint16_t segment_len;
197 uint16_t segment_base_high;
198 uint32_t segment_base_low;
203 struct sbp_targ_softc *sc;
204 struct fw_device *fwdev;
205 struct sbp_targ_login *login;
207 struct ccb_accept_tio *atio;
209 #define ORBI_STATUS_NONE 0
210 #define ORBI_STATUS_FETCH 1
211 #define ORBI_STATUS_ATIO 2
212 #define ORBI_STATUS_CTIO 3
213 #define ORBI_STATUS_STATUS 4
214 #define ORBI_STATUS_POINTER 5
215 #define ORBI_STATUS_ABORTED 7
222 STAILQ_ENTRY(orb_info) link;
224 struct unrestricted_page_table_fmt *page_table;
225 struct unrestricted_page_table_fmt *cur_pte;
226 struct unrestricted_page_table_fmt *last_pte;
227 uint32_t last_block_read;
228 struct sbp_status status;
231 static char *orb_fun_name[] = {
235 static void sbp_targ_recv(struct fw_xfer *);
236 static void sbp_targ_fetch_orb(struct sbp_targ_softc *, struct fw_device *,
237 uint16_t, uint32_t, struct sbp_targ_login *, int);
238 static void sbp_targ_xfer_pt(struct orb_info *);
239 static void sbp_targ_abort(struct sbp_targ_softc *, struct orb_info *);
242 sbp_targ_identify(driver_t *driver, device_t parent)
244 BUS_ADD_CHILD(parent, 0, "sbp_targ", device_get_unit(parent));
248 sbp_targ_probe(device_t dev)
252 pa = device_get_parent(dev);
253 if (device_get_unit(dev) != device_get_unit(pa)) {
257 device_set_desc(dev, "SBP-2/SCSI over FireWire target mode");
262 sbp_targ_dealloc_login(struct sbp_targ_login *login)
264 struct orb_info *orbi, *next;
267 printf("%s: login = NULL\n", __func__);
270 for (orbi = STAILQ_FIRST(&login->orbs); orbi != NULL; orbi = next) {
271 next = STAILQ_NEXT(orbi, link);
273 printf("%s: free orbi %p\n", __func__, orbi);
274 free(orbi, M_SBP_TARG);
277 callout_stop(&login->hold_callout);
279 STAILQ_REMOVE(&login->lstate->logins, login, sbp_targ_login, link);
280 login->lstate->sc->logins[login->id] = NULL;
282 printf("%s: free login %p\n", __func__, login);
283 free((void *)login, M_SBP_TARG);
288 sbp_targ_hold_expire(void *arg)
290 struct sbp_targ_login *login;
292 login = (struct sbp_targ_login *)arg;
294 if (login->flags & F_HOLD) {
295 printf("%s: login_id=%d expired\n", __func__, login->id);
296 sbp_targ_dealloc_login(login);
298 printf("%s: login_id=%d not hold\n", __func__, login->id);
303 sbp_targ_post_busreset(void *arg)
305 struct sbp_targ_softc *sc;
306 struct crom_src *src;
307 struct crom_chunk *root;
308 struct crom_chunk *unit;
309 struct sbp_targ_lstate *lstate;
310 struct sbp_targ_login *login;
313 sc = (struct sbp_targ_softc *)arg;
314 src = sc->fd.fc->crom_src;
315 root = sc->fd.fc->crom_root;
319 if ((sc->flags & F_FREEZED) == 0) {
320 sc->flags |= F_FREEZED;
321 xpt_freeze_simq(sc->sim, /*count*/1);
323 printf("%s: already freezed\n", __func__);
326 bzero(unit, sizeof(struct crom_chunk));
328 crom_add_chunk(src, root, unit, CROM_UDIR);
329 crom_add_entry(unit, CSRKEY_SPEC, CSRVAL_ANSIT10);
330 crom_add_entry(unit, CSRKEY_VER, CSRVAL_T10SBP2);
331 crom_add_entry(unit, CSRKEY_COM_SPEC, CSRVAL_ANSIT10);
332 crom_add_entry(unit, CSRKEY_COM_SET, CSRVAL_SCSI);
334 crom_add_entry(unit, CROM_MGM, SBP_TARG_MGM >> 2);
335 crom_add_entry(unit, CSRKEY_UNIT_CH, (10<<8) | 8);
337 for (i = 0; i < MAX_LUN; i++) {
338 lstate = sc->lstate[i];
341 crom_add_entry(unit, CSRKEY_FIRM_VER, 1);
342 crom_add_entry(unit, CROM_LUN, i);
343 crom_add_entry(unit, CSRKEY_MODEL, 1);
344 crom_add_simple_text(src, unit, &lstate->model, "TargetMode");
347 /* Process for reconnection hold time */
348 for (i = 0; i < MAX_LOGINS; i++) {
349 login = sc->logins[i];
352 sbp_targ_abort(sc, STAILQ_FIRST(&login->orbs));
353 if (login->flags & F_LOGIN) {
354 login->flags |= F_HOLD;
355 callout_reset(&login->hold_callout,
356 hz * login->hold_sec,
357 sbp_targ_hold_expire, (void *)login);
363 sbp_targ_post_explore(void *arg)
365 struct sbp_targ_softc *sc;
367 sc = (struct sbp_targ_softc *)arg;
368 sc->flags &= ~F_FREEZED;
369 xpt_release_simq(sc->sim, /*run queue*/TRUE);
374 sbp_targ_find_devs(struct sbp_targ_softc *sc, union ccb *ccb,
375 struct sbp_targ_lstate **lstate, int notfound_failure)
379 /* XXX 0 is the only vaild target_id */
380 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD &&
381 ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
382 *lstate = sc->black_hole;
384 printf("setting black hole for this target id(%d)\n", ccb->ccb_h.target_id);
385 return (CAM_REQ_CMP);
388 lun = ccb->ccb_h.target_lun;
390 return (CAM_LUN_INVALID);
392 *lstate = sc->lstate[lun];
394 if (notfound_failure != 0 && *lstate == NULL) {
396 printf("%s: lstate for lun is invalid, target(%d), lun(%d)\n",
397 __func__, ccb->ccb_h.target_id, lun);
398 return (CAM_PATH_INVALID);
401 printf("%s: setting lstate for tgt(%d) lun(%d)\n",
402 __func__,ccb->ccb_h.target_id, lun);
404 return (CAM_REQ_CMP);
408 sbp_targ_en_lun(struct sbp_targ_softc *sc, union ccb *ccb)
410 struct ccb_en_lun *cel = &ccb->cel;
411 struct sbp_targ_lstate *lstate;
414 status = sbp_targ_find_devs(sc, ccb, &lstate, 0);
415 if (status != CAM_REQ_CMP) {
416 ccb->ccb_h.status = status;
420 if (cel->enable != 0) {
421 if (lstate != NULL) {
422 xpt_print_path(ccb->ccb_h.path);
423 printf("Lun already enabled\n");
424 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
427 if (cel->grp6_len != 0 || cel->grp7_len != 0) {
428 ccb->ccb_h.status = CAM_REQ_INVALID;
429 printf("Non-zero Group Codes\n");
432 lstate = (struct sbp_targ_lstate *)
433 malloc(sizeof(*lstate), M_SBP_TARG, M_NOWAIT | M_ZERO);
434 if (lstate == NULL) {
435 xpt_print_path(ccb->ccb_h.path);
436 printf("Couldn't allocate lstate\n");
437 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
441 printf("%s: malloc'd lstate %p\n",__func__, lstate);
443 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD) {
444 sc->black_hole = lstate;
446 printf("Blackhole set due to target id == %d\n",
447 ccb->ccb_h.target_id);
449 sc->lstate[ccb->ccb_h.target_lun] = lstate;
451 memset(lstate, 0, sizeof(*lstate));
453 status = xpt_create_path(&lstate->path, /*periph*/NULL,
454 xpt_path_path_id(ccb->ccb_h.path),
455 xpt_path_target_id(ccb->ccb_h.path),
456 xpt_path_lun_id(ccb->ccb_h.path));
457 if (status != CAM_REQ_CMP) {
458 free(lstate, M_SBP_TARG);
460 xpt_print_path(ccb->ccb_h.path);
461 printf("Couldn't allocate path\n");
462 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
465 SLIST_INIT(&lstate->accept_tios);
466 SLIST_INIT(&lstate->immed_notifies);
467 STAILQ_INIT(&lstate->logins);
469 ccb->ccb_h.status = CAM_REQ_CMP;
470 xpt_print_path(ccb->ccb_h.path);
471 printf("Lun now enabled for target mode\n");
473 sc->fd.fc->ibr(sc->fd.fc);
475 struct sbp_targ_login *login, *next;
477 if (lstate == NULL) {
478 ccb->ccb_h.status = CAM_LUN_INVALID;
479 printf("Invalid lstate for this target\n");
482 ccb->ccb_h.status = CAM_REQ_CMP;
484 if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
485 printf("ATIOs pending\n");
486 ccb->ccb_h.status = CAM_REQ_INVALID;
489 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
490 printf("INOTs pending\n");
491 ccb->ccb_h.status = CAM_REQ_INVALID;
494 if (ccb->ccb_h.status != CAM_REQ_CMP) {
495 printf("status != CAM_REQ_CMP\n");
499 xpt_print_path(ccb->ccb_h.path);
500 printf("Target mode disabled\n");
501 xpt_free_path(lstate->path);
503 for (login = STAILQ_FIRST(&lstate->logins); login != NULL;
505 next = STAILQ_NEXT(login, link);
506 sbp_targ_dealloc_login(login);
509 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD)
510 sc->black_hole = NULL;
512 sc->lstate[ccb->ccb_h.target_lun] = NULL;
514 printf("%s: free lstate %p\n", __func__, lstate);
515 free(lstate, M_SBP_TARG);
519 sc->fd.fc->ibr(sc->fd.fc);
524 sbp_targ_send_lstate_events(struct sbp_targ_softc *sc,
525 struct sbp_targ_lstate *lstate)
528 struct ccb_hdr *ccbh;
529 struct ccb_immediate_notify *inot;
531 printf("%s: not implemented yet\n", __func__);
537 sbp_targ_remove_orb_info_locked(struct sbp_targ_login *login, struct orb_info *orbi)
539 STAILQ_REMOVE(&login->orbs, orbi, orb_info, link);
543 sbp_targ_remove_orb_info(struct sbp_targ_login *login, struct orb_info *orbi)
546 STAILQ_REMOVE(&login->orbs, orbi, orb_info, link);
547 SBP_UNLOCK(orbi->sc);
551 * tag_id/init_id encoding
553 * tag_id and init_id has only 32bit for each.
554 * scsi_target can handle very limited number(up to 15) of init_id.
555 * we have to encode 48bit orb and 64bit EUI64 into these
558 * tag_id represents lower 32bit of ORB address.
559 * init_id represents login_id.
563 static struct orb_info *
564 sbp_targ_get_orb_info(struct sbp_targ_lstate *lstate,
565 u_int tag_id, u_int init_id)
567 struct sbp_targ_login *login;
568 struct orb_info *orbi;
570 login = lstate->sc->logins[init_id];
572 printf("%s: no such login\n", __func__);
575 STAILQ_FOREACH(orbi, &login->orbs, link)
576 if (orbi->orb_lo == tag_id)
578 printf("%s: orb not found tag_id=0x%08x init_id=%d\n",
579 __func__, tag_id, init_id);
586 sbp_targ_abort(struct sbp_targ_softc *sc, struct orb_info *orbi)
588 struct orb_info *norbi;
591 for (; orbi != NULL; orbi = norbi) {
592 printf("%s: status=%d ccb=%p\n", __func__, orbi->state, orbi->ccb);
593 norbi = STAILQ_NEXT(orbi, link);
594 if (orbi->state != ORBI_STATUS_ABORTED) {
595 if (orbi->ccb != NULL) {
596 orbi->ccb->ccb_h.status = CAM_REQ_ABORTED;
600 if (orbi->state <= ORBI_STATUS_ATIO) {
601 sbp_targ_remove_orb_info_locked(orbi->login, orbi);
603 printf("%s: free orbi %p\n", __func__, orbi);
604 free(orbi, M_SBP_TARG);
607 orbi->state = ORBI_STATUS_ABORTED;
614 sbp_targ_free_orbi(struct fw_xfer *xfer)
616 struct orb_info *orbi;
618 if (xfer->resp != 0) {
620 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
622 orbi = (struct orb_info *)xfer->sc;
623 if ( orbi->page_table != NULL ) {
625 printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table);
626 free(orbi->page_table, M_SBP_TARG);
627 orbi->page_table = NULL;
630 printf("%s: free orbi %p\n", __func__, orbi);
631 free(orbi, M_SBP_TARG);
637 sbp_targ_status_FIFO(struct orb_info *orbi,
638 uint32_t fifo_hi, uint32_t fifo_lo, int dequeue)
640 struct fw_xfer *xfer;
643 sbp_targ_remove_orb_info(orbi->login, orbi);
645 xfer = fwmem_write_block(orbi->fwdev, (void *)orbi,
646 /*spd*/FWSPD_S400, fifo_hi, fifo_lo,
647 sizeof(uint32_t) * (orbi->status.len + 1), (char *)&orbi->status,
652 printf("%s: xfer == NULL\n", __func__);
657 * Generate the appropriate CAM status for the
661 sbp_targ_send_status(struct orb_info *orbi, union ccb *ccb)
663 struct sbp_status *sbp_status;
665 struct orb_info *norbi;
668 sbp_status = &orbi->status;
670 orbi->state = ORBI_STATUS_STATUS;
672 sbp_status->resp = 0; /* XXX */
673 sbp_status->status = 0; /* XXX */
674 sbp_status->dead = 0; /* XXX */
676 ccb->ccb_h.status= CAM_REQ_CMP;
678 switch (ccb->csio.scsi_status) {
681 printf("%s: STATUS_OK\n", __func__);
684 case SCSI_STATUS_CHECK_COND:
686 printf("%s: STATUS SCSI_STATUS_CHECK_COND\n", __func__);
687 goto process_scsi_status;
688 case SCSI_STATUS_BUSY:
690 printf("%s: STATUS SCSI_STATUS_BUSY\n", __func__);
691 goto process_scsi_status;
692 case SCSI_STATUS_CMD_TERMINATED:
695 struct sbp_cmd_status *sbp_cmd_status;
696 struct scsi_sense_data *sense;
697 int error_code, sense_key, asc, ascq;
704 sbp_cmd_status = (struct sbp_cmd_status *)&sbp_status->data[0];
705 sbp_cmd_status->status = ccb->csio.scsi_status;
706 sense = &ccb->csio.sense_data;
708 #if 0 /* XXX What we should do? */
710 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link));
712 norbi = STAILQ_NEXT(orbi, link);
714 printf("%s: status=%d\n", __func__, norbi->state);
715 if (norbi->ccb != NULL) {
716 norbi->ccb->ccb_h.status = CAM_REQ_ABORTED;
717 xpt_done(norbi->ccb);
720 sbp_targ_remove_orb_info_locked(orbi->login, norbi);
721 norbi = STAILQ_NEXT(norbi, link);
722 free(norbi, M_SBP_TARG);
727 sense_len = ccb->csio.sense_len - ccb->csio.sense_resid;
728 scsi_extract_sense_len(sense, sense_len, &error_code,
729 &sense_key, &asc, &ascq, /*show_errors*/ 0);
731 switch (error_code) {
732 case SSD_CURRENT_ERROR:
733 case SSD_DESC_CURRENT_ERROR:
734 sbp_cmd_status->sfmt = SBP_SFMT_CURR;
737 sbp_cmd_status->sfmt = SBP_SFMT_DEFER;
741 if (scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &info,
744 sbp_cmd_status->valid = 1;
747 sbp_cmd_status->info = htobe32(info_trunc);
749 sbp_cmd_status->valid = 0;
752 sbp_cmd_status->s_key = sense_key;
754 if (scsi_get_stream_info(sense, sense_len, NULL,
755 &stream_bits) == 0) {
756 sbp_cmd_status->mark =
757 (stream_bits & SSD_FILEMARK) ? 1 : 0;
758 sbp_cmd_status->eom =
759 (stream_bits & SSD_EOM) ? 1 : 0;
760 sbp_cmd_status->ill_len =
761 (stream_bits & SSD_ILI) ? 1 : 0;
763 sbp_cmd_status->mark = 0;
764 sbp_cmd_status->eom = 0;
765 sbp_cmd_status->ill_len = 0;
769 /* add_sense_code(_qual), info, cmd_spec_info */
772 if (scsi_get_sense_info(sense, sense_len, SSD_DESC_COMMAND,
773 &info, &sinfo) == 0) {
774 uint32_t cmdspec_trunc;
776 cmdspec_trunc = info;
778 sbp_cmd_status->cdb = htobe32(cmdspec_trunc);
781 sbp_cmd_status->s_code = asc;
782 sbp_cmd_status->s_qlfr = ascq;
784 if (scsi_get_sense_info(sense, sense_len, SSD_DESC_FRU, &info,
786 sbp_cmd_status->fru = (uint8_t)info;
789 sbp_cmd_status->fru = 0;
792 if (scsi_get_sks(sense, sense_len, sks) == 0) {
793 bcopy(sks, &sbp_cmd_status->s_keydep[0], sizeof(sks));
795 ccb->ccb_h.status |= CAM_SENT_SENSE;
801 printf("%s: unknown scsi status 0x%x\n", __func__,
806 sbp_targ_status_FIFO(orbi,
807 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1);
811 * Invoked as a callback handler from fwmem_read/write_block
813 * Process read/write of initiator address space
814 * completion and pass status onto the backend target.
815 * If this is a partial read/write for a CCB then
816 * we decrement the orbi's refcount to indicate
817 * the status of the read/write is complete
820 sbp_targ_cam_done(struct fw_xfer *xfer)
822 struct orb_info *orbi;
825 orbi = (struct orb_info *)xfer->sc;
828 printf("%s: resp=%d refcount=%d\n", __func__,
829 xfer->resp, orbi->refcount);
831 if (xfer->resp != 0) {
832 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
833 orbi->status.resp = SBP_TRANS_FAIL;
834 orbi->status.status = OBJ_DATA | SBE_TIMEOUT/*XXX*/;
835 orbi->status.dead = 1;
836 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link));
842 if (orbi->refcount == 0) {
844 if (orbi->state == ORBI_STATUS_ABORTED) {
846 printf("%s: orbi aborted\n", __func__);
847 sbp_targ_remove_orb_info(orbi->login, orbi);
848 if (orbi->page_table != NULL) {
850 printf("%s: free orbi->page_table %p\n",
851 __func__, orbi->page_table);
852 free(orbi->page_table, M_SBP_TARG);
855 printf("%s: free orbi %p\n", __func__, orbi);
856 free(orbi, M_SBP_TARG);
858 } else if (orbi->status.resp == ORBI_STATUS_NONE) {
859 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
861 printf("%s: CAM_SEND_STATUS set %0x\n", __func__, ccb->ccb_h.flags);
862 sbp_targ_send_status(orbi, ccb);
865 printf("%s: CAM_SEND_STATUS not set %0x\n", __func__, ccb->ccb_h.flags);
866 ccb->ccb_h.status = CAM_REQ_CMP;
870 orbi->status.len = 1;
871 sbp_targ_status_FIFO(orbi,
872 orbi->login->fifo_hi, orbi->login->fifo_lo,
874 ccb->ccb_h.status = CAM_REQ_ABORTED;
883 sbp_targ_abort_ccb(struct sbp_targ_softc *sc, union ccb *ccb)
886 struct sbp_targ_lstate *lstate;
887 struct ccb_hdr_slist *list;
888 struct ccb_hdr *curelm;
892 status = sbp_targ_find_devs(sc, ccb, &lstate, 0);
893 if (status != CAM_REQ_CMP)
896 accb = ccb->cab.abort_ccb;
898 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
899 list = &lstate->accept_tios;
900 else if (accb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY)
901 list = &lstate->immed_notifies;
903 return (CAM_UA_ABORT);
905 curelm = SLIST_FIRST(list);
907 if (curelm == &accb->ccb_h) {
909 SLIST_REMOVE_HEAD(list, sim_links.sle);
911 while (curelm != NULL) {
912 struct ccb_hdr *nextelm;
914 nextelm = SLIST_NEXT(curelm, sim_links.sle);
915 if (nextelm == &accb->ccb_h) {
917 SLIST_NEXT(curelm, sim_links.sle) =
918 SLIST_NEXT(nextelm, sim_links.sle);
925 accb->ccb_h.status = CAM_REQ_ABORTED;
927 return (CAM_REQ_CMP);
929 printf("%s: not found\n", __func__);
930 return (CAM_PATH_INVALID);
934 * directly execute a read or write to the initiator
935 * address space and set hand(sbp_targ_cam_done) to
936 * process the completion from the SIM to the target.
937 * set orbi->refcount to inidicate that a read/write
938 * is inflight to/from the initiator.
941 sbp_targ_xfer_buf(struct orb_info *orbi, u_int offset,
942 uint16_t dst_hi, uint32_t dst_lo, u_int size,
943 void (*hand)(struct fw_xfer *))
945 struct fw_xfer *xfer;
946 u_int len, ccb_dir, off = 0;
950 printf("%s: offset=%d size=%d\n", __func__, offset, size);
951 ccb_dir = orbi->ccb->ccb_h.flags & CAM_DIR_MASK;
952 ptr = (char *)orbi->ccb->csio.data_ptr + offset;
955 /* XXX assume dst_lo + off doesn't overflow */
956 len = MIN(size, 2048 /* XXX */);
959 if (ccb_dir == CAM_DIR_OUT) {
961 printf("%s: CAM_DIR_OUT --> read block in?\n",__func__);
962 xfer = fwmem_read_block(orbi->fwdev,
963 (void *)orbi, /*spd*/FWSPD_S400,
964 dst_hi, dst_lo + off, len,
968 printf("%s: CAM_DIR_IN --> write block out?\n",__func__);
969 xfer = fwmem_write_block(orbi->fwdev,
970 (void *)orbi, /*spd*/FWSPD_S400,
971 dst_hi, dst_lo + off, len,
975 printf("%s: xfer == NULL", __func__);
976 /* XXX what should we do?? */
984 sbp_targ_pt_done(struct fw_xfer *xfer)
986 struct orb_info *orbi;
987 struct unrestricted_page_table_fmt *pt;
990 orbi = (struct orb_info *)xfer->sc;
992 if (orbi->state == ORBI_STATUS_ABORTED) {
994 printf("%s: orbi aborted\n", __func__);
995 sbp_targ_remove_orb_info(orbi->login, orbi);
997 printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table);
998 printf("%s: free orbi %p\n", __func__, orbi);
1000 free(orbi->page_table, M_SBP_TARG);
1001 free(orbi, M_SBP_TARG);
1006 if (xfer->resp != 0) {
1007 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
1008 orbi->status.resp = SBP_TRANS_FAIL;
1009 orbi->status.status = OBJ_PT | SBE_TIMEOUT/*XXX*/;
1010 orbi->status.dead = 1;
1011 orbi->status.len = 1;
1012 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link));
1015 printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table);
1017 sbp_targ_status_FIFO(orbi,
1018 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1);
1019 free(orbi->page_table, M_SBP_TARG);
1020 orbi->page_table = NULL;
1026 * Set endianness here so we don't have
1027 * to deal with is later
1029 for (i = 0, pt = orbi->page_table; i < orbi->orb4.data_size; i++, pt++) {
1030 pt->segment_len = ntohs(pt->segment_len);
1032 printf("%s:segment_len = %u\n", __func__,pt->segment_len);
1033 pt->segment_base_high = ntohs(pt->segment_base_high);
1034 pt->segment_base_low = ntohl(pt->segment_base_low);
1037 sbp_targ_xfer_pt(orbi);
1040 if (orbi->refcount == 0)
1041 printf("%s: refcount == 0\n", __func__);
1047 static void sbp_targ_xfer_pt(struct orb_info *orbi)
1050 uint32_t res, offset, len;
1054 printf("%s: dxfer_len=%d\n", __func__, ccb->csio.dxfer_len);
1055 res = ccb->csio.dxfer_len;
1057 * If the page table required multiple CTIO's to
1058 * complete, then cur_pte is non NULL
1059 * and we need to start from the last position
1060 * If this is the first pass over a page table
1061 * then we just start at the beginning of the page
1064 * Parse the unrestricted page table and figure out where we need
1065 * to shove the data from this read request.
1067 for (offset = 0, len = 0; (res != 0) && (orbi->cur_pte < orbi->last_pte); offset += len) {
1068 len = MIN(orbi->cur_pte->segment_len, res);
1071 printf("%s:page_table: %04x:%08x segment_len(%u) res(%u) len(%u)\n",
1072 __func__, orbi->cur_pte->segment_base_high,
1073 orbi->cur_pte->segment_base_low,
1074 orbi->cur_pte->segment_len,
1076 sbp_targ_xfer_buf(orbi, offset,
1077 orbi->cur_pte->segment_base_high,
1078 orbi->cur_pte->segment_base_low,
1079 len, sbp_targ_cam_done);
1081 * If we have only written partially to
1082 * this page table, then we need to save
1083 * our position for the next CTIO. If we
1084 * have completed the page table, then we
1085 * are safe to move on to the next entry.
1087 if (len == orbi->cur_pte->segment_len) {
1090 uint32_t saved_base_low;
1092 /* Handle transfers that cross a 4GB boundary. */
1093 saved_base_low = orbi->cur_pte->segment_base_low;
1094 orbi->cur_pte->segment_base_low += len;
1095 if (orbi->cur_pte->segment_base_low < saved_base_low)
1096 orbi->cur_pte->segment_base_high++;
1098 orbi->cur_pte->segment_len -= len;
1102 printf("%s: base_low(%08x) page_table_off(%p) last_block(%u)\n",
1103 __func__, orbi->cur_pte->segment_base_low,
1104 orbi->cur_pte, orbi->last_block_read);
1107 printf("Warning - short pt encountered. "
1108 "Could not transfer all data.\n");
1113 * Create page table in local memory
1114 * and transfer it from the initiator
1115 * in order to know where we are supposed
1120 sbp_targ_fetch_pt(struct orb_info *orbi)
1122 struct fw_xfer *xfer;
1125 * Pull in page table from initiator
1126 * and setup for data from our
1129 if (orbi->page_table == NULL) {
1130 orbi->page_table = malloc(orbi->orb4.data_size*
1131 sizeof(struct unrestricted_page_table_fmt),
1132 M_SBP_TARG, M_NOWAIT|M_ZERO);
1133 if (orbi->page_table == NULL)
1135 orbi->cur_pte = orbi->page_table;
1136 orbi->last_pte = orbi->page_table + orbi->orb4.data_size;
1137 orbi->last_block_read = orbi->orb4.data_size;
1138 if (debug && orbi->page_table != NULL)
1139 printf("%s: malloc'd orbi->page_table(%p), orb4.data_size(%u)\n",
1140 __func__, orbi->page_table, orbi->orb4.data_size);
1142 xfer = fwmem_read_block(orbi->fwdev, (void *)orbi, /*spd*/FWSPD_S400,
1143 orbi->data_hi, orbi->data_lo, orbi->orb4.data_size*
1144 sizeof(struct unrestricted_page_table_fmt),
1145 (void *)orbi->page_table, sbp_targ_pt_done);
1151 * This is a CTIO for a page table we have
1152 * already malloc'd, so just directly invoke
1153 * the xfer function on the orbi.
1155 sbp_targ_xfer_pt(orbi);
1159 orbi->ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1161 printf("%s: free orbi->page_table %p due to xfer == NULL\n", __func__, orbi->page_table);
1162 if (orbi->page_table != NULL) {
1163 free(orbi->page_table, M_SBP_TARG);
1164 orbi->page_table = NULL;
1166 xpt_done(orbi->ccb);
1171 sbp_targ_action1(struct cam_sim *sim, union ccb *ccb)
1173 struct sbp_targ_softc *sc;
1174 struct sbp_targ_lstate *lstate;
1178 sc = (struct sbp_targ_softc *)cam_sim_softc(sim);
1180 status = sbp_targ_find_devs(sc, ccb, &lstate, TRUE);
1182 switch (ccb->ccb_h.func_code) {
1183 case XPT_CONT_TARGET_IO:
1185 struct orb_info *orbi;
1188 printf("%s: XPT_CONT_TARGET_IO (0x%08x)\n",
1189 __func__, ccb->csio.tag_id);
1191 if (status != CAM_REQ_CMP) {
1192 ccb->ccb_h.status = status;
1196 /* XXX transfer from/to initiator */
1197 orbi = sbp_targ_get_orb_info(lstate,
1198 ccb->csio.tag_id, ccb->csio.init_id);
1200 ccb->ccb_h.status = CAM_REQ_ABORTED; /* XXX */
1204 if (orbi->state == ORBI_STATUS_ABORTED) {
1206 printf("%s: ctio aborted\n", __func__);
1207 sbp_targ_remove_orb_info_locked(orbi->login, orbi);
1209 printf("%s: free orbi %p\n", __func__, orbi);
1210 free(orbi, M_SBP_TARG);
1211 ccb->ccb_h.status = CAM_REQ_ABORTED;
1215 orbi->state = ORBI_STATUS_CTIO;
1218 ccb_dir = ccb->ccb_h.flags & CAM_DIR_MASK;
1221 if (ccb->csio.dxfer_len == 0)
1222 ccb_dir = CAM_DIR_NONE;
1225 if (ccb_dir == CAM_DIR_IN && orbi->orb4.dir == 0)
1226 printf("%s: direction mismatch\n", __func__);
1228 /* check page table */
1229 if (ccb_dir != CAM_DIR_NONE && orbi->orb4.page_table_present) {
1231 printf("%s: page_table_present\n",
1233 if (orbi->orb4.page_size != 0) {
1234 printf("%s: unsupported pagesize %d != 0\n",
1235 __func__, orbi->orb4.page_size);
1236 ccb->ccb_h.status = CAM_REQ_INVALID;
1240 sbp_targ_fetch_pt(orbi);
1245 if (ccb_dir != CAM_DIR_NONE) {
1246 sbp_targ_xfer_buf(orbi, 0, orbi->data_hi,
1248 MIN(orbi->orb4.data_size, ccb->csio.dxfer_len),
1250 if ( orbi->orb4.data_size > ccb->csio.dxfer_len ) {
1251 orbi->data_lo += ccb->csio.dxfer_len;
1252 orbi->orb4.data_size -= ccb->csio.dxfer_len;
1256 if (ccb_dir == CAM_DIR_NONE) {
1257 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1260 sbp_targ_send_status(orbi, ccb);
1263 ccb->ccb_h.status = CAM_REQ_CMP;
1268 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
1269 if (status != CAM_REQ_CMP) {
1270 ccb->ccb_h.status = status;
1274 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
1276 ccb->ccb_h.status = CAM_REQ_INPROG;
1277 if ((lstate->flags & F_ATIO_STARVED) != 0) {
1278 struct sbp_targ_login *login;
1281 printf("%s: new atio arrived\n", __func__);
1282 lstate->flags &= ~F_ATIO_STARVED;
1283 STAILQ_FOREACH(login, &lstate->logins, link)
1284 if ((login->flags & F_ATIO_STARVED) != 0) {
1285 login->flags &= ~F_ATIO_STARVED;
1286 sbp_targ_fetch_orb(lstate->sc,
1288 login->last_hi, login->last_lo,
1293 case XPT_NOTIFY_ACKNOWLEDGE: /* recycle notify ack */
1294 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */
1295 if (status != CAM_REQ_CMP) {
1296 ccb->ccb_h.status = status;
1300 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
1302 ccb->ccb_h.status = CAM_REQ_INPROG;
1303 sbp_targ_send_lstate_events(sc, lstate);
1306 sbp_targ_en_lun(sc, ccb);
1311 struct ccb_pathinq *cpi = &ccb->cpi;
1313 cpi->version_num = 1; /* XXX??? */
1314 cpi->hba_inquiry = PI_TAG_ABLE;
1315 cpi->target_sprt = PIT_PROCESSOR
1318 cpi->transport = XPORT_SPI; /* FIXME add XPORT_FW type to cam */
1319 cpi->hba_misc = PIM_NOINITIATOR | PIM_NOBUSRESET |
1321 cpi->hba_eng_cnt = 0;
1322 cpi->max_target = 7; /* XXX */
1323 cpi->max_lun = MAX_LUN - 1;
1324 cpi->initiator_id = 7; /* XXX */
1325 cpi->bus_id = sim->bus_id;
1326 cpi->base_transfer_speed = 400 * 1000 / 8;
1327 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1328 strlcpy(cpi->hba_vid, "SBP_TARG", HBA_IDLEN);
1329 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
1330 cpi->unit_number = sim->unit_number;
1332 cpi->ccb_h.status = CAM_REQ_CMP;
1338 union ccb *accb = ccb->cab.abort_ccb;
1340 switch (accb->ccb_h.func_code) {
1341 case XPT_ACCEPT_TARGET_IO:
1342 case XPT_IMMEDIATE_NOTIFY:
1343 ccb->ccb_h.status = sbp_targ_abort_ccb(sc, ccb);
1345 case XPT_CONT_TARGET_IO:
1347 ccb->ccb_h.status = CAM_UA_ABORT;
1350 printf("%s: aborting unknown function %d\n",
1351 __func__, accb->ccb_h.func_code);
1352 ccb->ccb_h.status = CAM_REQ_INVALID;
1358 #ifdef CAM_NEW_TRAN_CODE
1359 case XPT_SET_TRAN_SETTINGS:
1360 ccb->ccb_h.status = CAM_REQ_INVALID;
1363 case XPT_GET_TRAN_SETTINGS:
1365 struct ccb_trans_settings *cts = &ccb->cts;
1366 struct ccb_trans_settings_scsi *scsi =
1367 &cts->proto_specific.scsi;
1368 struct ccb_trans_settings_spi *spi =
1369 &cts->xport_specific.spi;
1371 cts->protocol = PROTO_SCSI;
1372 cts->protocol_version = SCSI_REV_2;
1373 cts->transport = XPORT_FW; /* should have a FireWire */
1374 cts->transport_version = 2;
1375 spi->valid = CTS_SPI_VALID_DISC;
1376 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
1377 scsi->valid = CTS_SCSI_VALID_TQ;
1378 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1380 printf("%s:%d:%d XPT_GET_TRAN_SETTINGS:\n",
1381 device_get_nameunit(sc->fd.dev),
1382 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
1384 cts->ccb_h.status = CAM_REQ_CMP;
1391 printf("%s: unknown function 0x%x\n",
1392 __func__, ccb->ccb_h.func_code);
1393 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1401 sbp_targ_action(struct cam_sim *sim, union ccb *ccb)
1406 sbp_targ_action1(sim, ccb);
1411 sbp_targ_poll(struct cam_sim *sim)
1418 sbp_targ_cmd_handler(struct fw_xfer *xfer)
1423 struct orb_info *orbi;
1424 struct ccb_accept_tio *atio;
1428 orbi = (struct orb_info *)xfer->sc;
1429 if (xfer->resp != 0) {
1430 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
1431 orbi->status.resp = SBP_TRANS_FAIL;
1432 orbi->status.status = OBJ_ORB | SBE_TIMEOUT/*XXX*/;
1433 orbi->status.dead = 1;
1434 orbi->status.len = 1;
1435 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link));
1437 sbp_targ_status_FIFO(orbi,
1438 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1);
1442 fp = &xfer->recv.hdr;
1446 if (orbi->state == ORBI_STATUS_ABORTED) {
1447 printf("%s: aborted\n", __func__);
1448 sbp_targ_remove_orb_info(orbi->login, orbi);
1449 free(orbi, M_SBP_TARG);
1450 atio->ccb_h.status = CAM_REQ_ABORTED;
1451 xpt_done((union ccb*)atio);
1454 orbi->state = ORBI_STATUS_ATIO;
1457 /* swap payload except SCSI command */
1458 for (i = 0; i < 5; i++)
1459 orb[i] = ntohl(orb[i]);
1461 orb4 = (struct corb4 *)&orb[4];
1462 if (orb4->rq_fmt != 0) {
1464 printf("%s: rq_fmt(%d) != 0\n", __func__, orb4->rq_fmt);
1467 atio->ccb_h.target_id = 0; /* XXX */
1468 atio->ccb_h.target_lun = orbi->login->lstate->lun;
1469 atio->sense_len = 0;
1470 atio->tag_action = MSG_SIMPLE_TASK;
1471 atio->tag_id = orbi->orb_lo;
1472 atio->init_id = orbi->login->id;
1474 atio->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1475 bytes = (u_char *)&orb[5];
1477 printf("%s: %p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
1478 __func__, (void *)atio,
1479 bytes[0], bytes[1], bytes[2], bytes[3], bytes[4],
1480 bytes[5], bytes[6], bytes[7], bytes[8], bytes[9]);
1481 switch (bytes[0] >> 5) {
1497 /* Only copy the opcode. */
1499 printf("Reserved or VU command code type encountered\n");
1503 memcpy(atio->cdb_io.cdb_bytes, bytes, atio->cdb_len);
1505 atio->ccb_h.status |= CAM_CDB_RECVD;
1508 if ((orb[0] & (1<<31)) == 0) {
1510 printf("%s: fetch next orb\n", __func__);
1511 orbi->status.src = SRC_NEXT_EXISTS;
1512 sbp_targ_fetch_orb(orbi->sc, orbi->fwdev,
1513 orb[0], orb[1], orbi->login, FETCH_CMD);
1515 orbi->status.src = SRC_NO_NEXT;
1516 orbi->login->flags &= ~F_LINK_ACTIVE;
1519 orbi->data_hi = orb[2];
1520 orbi->data_lo = orb[3];
1523 xpt_done((union ccb*)atio);
1529 static struct sbp_targ_login *
1530 sbp_targ_get_login(struct sbp_targ_softc *sc, struct fw_device *fwdev, int lun)
1532 struct sbp_targ_lstate *lstate;
1533 struct sbp_targ_login *login;
1536 lstate = sc->lstate[lun];
1538 STAILQ_FOREACH(login, &lstate->logins, link)
1539 if (login->fwdev == fwdev)
1542 for (i = 0; i < MAX_LOGINS; i++)
1543 if (sc->logins[i] == NULL)
1546 printf("%s: increase MAX_LOGIN\n", __func__);
1550 login = (struct sbp_targ_login *)malloc(
1551 sizeof(struct sbp_targ_login), M_SBP_TARG, M_NOWAIT | M_ZERO);
1553 if (login == NULL) {
1554 printf("%s: malloc failed\n", __func__);
1559 login->fwdev = fwdev;
1560 login->lstate = lstate;
1561 login->last_hi = 0xffff;
1562 login->last_lo = 0xffffffff;
1563 login->hold_sec = 1;
1564 STAILQ_INIT(&login->orbs);
1565 CALLOUT_INIT(&login->hold_callout);
1566 sc->logins[i] = login;
1571 sbp_targ_mgm_handler(struct fw_xfer *xfer)
1573 struct sbp_targ_lstate *lstate;
1574 struct sbp_targ_login *login;
1578 struct orb_info *orbi;
1581 orbi = (struct orb_info *)xfer->sc;
1582 if (xfer->resp != 0) {
1583 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
1584 orbi->status.resp = SBP_TRANS_FAIL;
1585 orbi->status.status = OBJ_ORB | SBE_TIMEOUT/*XXX*/;
1586 orbi->status.dead = 1;
1587 orbi->status.len = 1;
1588 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link));
1590 sbp_targ_status_FIFO(orbi,
1591 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/0);
1595 fp = &xfer->recv.hdr;
1599 for (i = 0; i < 8; i++) {
1600 orb[i] = ntohl(orb[i]);
1602 orb4 = (struct morb4 *)&orb[4];
1604 printf("%s: %s\n", __func__, orb_fun_name[orb4->fun]);
1606 orbi->status.src = SRC_NO_NEXT;
1608 switch (orb4->fun << 16) {
1611 int exclusive = 0, lun;
1613 if (orb[4] & ORB_EXV)
1617 lstate = orbi->sc->lstate[lun];
1619 if (lun >= MAX_LUN || lstate == NULL ||
1621 STAILQ_FIRST(&lstate->logins) != NULL &&
1622 STAILQ_FIRST(&lstate->logins)->fwdev != orbi->fwdev)
1625 orbi->status.dead = 1;
1626 orbi->status.status = STATUS_ACCESS_DENY;
1627 orbi->status.len = 1;
1631 /* allocate login */
1632 login = sbp_targ_get_login(orbi->sc, orbi->fwdev, lun);
1633 if (login == NULL) {
1634 printf("%s: sbp_targ_get_login failed\n",
1636 orbi->status.dead = 1;
1637 orbi->status.status = STATUS_RES_UNAVAIL;
1638 orbi->status.len = 1;
1641 printf("%s: login id=%d\n", __func__, login->id);
1643 login->fifo_hi = orb[6];
1644 login->fifo_lo = orb[7];
1645 login->loginres.len = htons(sizeof(uint32_t) * 4);
1646 login->loginres.id = htons(login->id);
1647 login->loginres.cmd_hi = htons(SBP_TARG_BIND_HI);
1648 login->loginres.cmd_lo = htonl(SBP_TARG_BIND_LO(login->id));
1649 login->loginres.recon_hold = htons(login->hold_sec);
1651 STAILQ_INSERT_TAIL(&lstate->logins, login, link);
1652 fwmem_write_block(orbi->fwdev, NULL, /*spd*/FWSPD_S400, orb[2], orb[3],
1653 sizeof(struct sbp_login_res), (void *)&login->loginres,
1654 fw_asy_callback_free);
1655 /* XXX return status after loginres is successfully written */
1659 login = orbi->sc->logins[orb4->id];
1660 if (login != NULL && login->fwdev == orbi->fwdev) {
1661 login->flags &= ~F_HOLD;
1662 callout_stop(&login->hold_callout);
1663 printf("%s: reconnected id=%d\n",
1664 __func__, login->id);
1666 orbi->status.dead = 1;
1667 orbi->status.status = STATUS_ACCESS_DENY;
1668 printf("%s: reconnection faild id=%d\n",
1669 __func__, orb4->id);
1673 login = orbi->sc->logins[orb4->id];
1674 if (login->fwdev != orbi->fwdev) {
1675 printf("%s: wrong initiator\n", __func__);
1678 sbp_targ_dealloc_login(login);
1681 printf("%s: %s not implemented yet\n",
1682 __func__, orb_fun_name[orb4->fun]);
1685 orbi->status.len = 1;
1686 sbp_targ_status_FIFO(orbi, orb[6], orb[7], /*dequeue*/0);
1692 sbp_targ_pointer_handler(struct fw_xfer *xfer)
1694 struct orb_info *orbi;
1695 uint32_t orb0, orb1;
1697 orbi = (struct orb_info *)xfer->sc;
1698 if (xfer->resp != 0) {
1699 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
1703 orb0 = ntohl(orbi->orb[0]);
1704 orb1 = ntohl(orbi->orb[1]);
1705 if ((orb0 & (1U << 31)) != 0) {
1706 printf("%s: invalid pointer\n", __func__);
1709 sbp_targ_fetch_orb(orbi->login->lstate->sc, orbi->fwdev,
1710 (uint16_t)orb0, orb1, orbi->login, FETCH_CMD);
1712 free(orbi, M_SBP_TARG);
1718 sbp_targ_fetch_orb(struct sbp_targ_softc *sc, struct fw_device *fwdev,
1719 uint16_t orb_hi, uint32_t orb_lo, struct sbp_targ_login *login,
1722 struct orb_info *orbi;
1725 printf("%s: fetch orb %04x:%08x\n", __func__, orb_hi, orb_lo);
1726 orbi = malloc(sizeof(struct orb_info), M_SBP_TARG, M_NOWAIT | M_ZERO);
1728 printf("%s: malloc failed\n", __func__);
1732 orbi->fwdev = fwdev;
1733 orbi->login = login;
1734 orbi->orb_hi = orb_hi;
1735 orbi->orb_lo = orb_lo;
1736 orbi->status.orb_hi = htons(orb_hi);
1737 orbi->status.orb_lo = htonl(orb_lo);
1738 orbi->page_table = NULL;
1742 fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo,
1743 sizeof(uint32_t) * 8, &orbi->orb[0],
1744 sbp_targ_mgm_handler);
1747 orbi->state = ORBI_STATUS_FETCH;
1748 login->last_hi = orb_hi;
1749 login->last_lo = orb_lo;
1750 login->flags |= F_LINK_ACTIVE;
1753 orbi->atio = (struct ccb_accept_tio *)
1754 SLIST_FIRST(&login->lstate->accept_tios);
1755 if (orbi->atio == NULL) {
1757 printf("%s: no free atio\n", __func__);
1758 login->lstate->flags |= F_ATIO_STARVED;
1759 login->flags |= F_ATIO_STARVED;
1762 login->fwdev = fwdev;
1766 SLIST_REMOVE_HEAD(&login->lstate->accept_tios, sim_links.sle);
1767 STAILQ_INSERT_TAIL(&login->orbs, orbi, link);
1769 fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo,
1770 sizeof(uint32_t) * 8, &orbi->orb[0],
1771 sbp_targ_cmd_handler);
1774 orbi->state = ORBI_STATUS_POINTER;
1775 login->flags |= F_LINK_ACTIVE;
1776 fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo,
1777 sizeof(uint32_t) * 2, &orbi->orb[0],
1778 sbp_targ_pointer_handler);
1781 printf("%s: invalid mode %d\n", __func__, mode);
1786 sbp_targ_resp_callback(struct fw_xfer *xfer)
1788 struct sbp_targ_softc *sc;
1792 printf("%s: xfer=%p\n", __func__, xfer);
1793 sc = (struct sbp_targ_softc *)xfer->sc;
1794 fw_xfer_unload(xfer);
1795 xfer->recv.pay_len = SBP_TARG_RECV_LEN;
1796 xfer->hand = sbp_targ_recv;
1798 STAILQ_INSERT_TAIL(&sc->fwb.xferlist, xfer, link);
1803 sbp_targ_cmd(struct fw_xfer *xfer, struct fw_device *fwdev, int login_id,
1806 struct sbp_targ_login *login;
1807 struct sbp_targ_softc *sc;
1810 if (login_id < 0 || login_id >= MAX_LOGINS)
1811 return (RESP_ADDRESS_ERROR);
1813 sc = (struct sbp_targ_softc *)xfer->sc;
1814 login = sc->logins[login_id];
1816 return (RESP_ADDRESS_ERROR);
1818 if (login->fwdev != fwdev) {
1820 return (RESP_ADDRESS_ERROR);
1824 case 0x08: /* ORB_POINTER */
1826 printf("%s: ORB_POINTER(%d)\n", __func__, login_id);
1827 if ((login->flags & F_LINK_ACTIVE) != 0) {
1829 printf("link active (ORB_POINTER)\n");
1832 sbp_targ_fetch_orb(sc, fwdev,
1833 ntohl(xfer->recv.payload[0]),
1834 ntohl(xfer->recv.payload[1]),
1837 case 0x04: /* AGENT_RESET */
1839 printf("%s: AGENT RESET(%d)\n", __func__, login_id);
1840 login->last_hi = 0xffff;
1841 login->last_lo = 0xffffffff;
1842 sbp_targ_abort(sc, STAILQ_FIRST(&login->orbs));
1844 case 0x10: /* DOORBELL */
1846 printf("%s: DOORBELL(%d)\n", __func__, login_id);
1847 if (login->last_hi == 0xffff &&
1848 login->last_lo == 0xffffffff) {
1849 printf("%s: no previous pointer(DOORBELL)\n",
1853 if ((login->flags & F_LINK_ACTIVE) != 0) {
1855 printf("link active (DOORBELL)\n");
1858 sbp_targ_fetch_orb(sc, fwdev,
1859 login->last_hi, login->last_lo,
1860 login, FETCH_POINTER);
1862 case 0x00: /* AGENT_STATE */
1863 printf("%s: AGENT_STATE (%d:ignore)\n", __func__, login_id);
1865 case 0x14: /* UNSOLICITED_STATE_ENABLE */
1866 printf("%s: UNSOLICITED_STATE_ENABLE (%d:ignore)\n",
1867 __func__, login_id);
1870 printf("%s: invalid register %d(%d)\n",
1871 __func__, reg, login_id);
1872 rtcode = RESP_ADDRESS_ERROR;
1879 sbp_targ_mgm(struct fw_xfer *xfer, struct fw_device *fwdev)
1881 struct sbp_targ_softc *sc;
1884 sc = (struct sbp_targ_softc *)xfer->sc;
1886 fp = &xfer->recv.hdr;
1887 if (fp->mode.wreqb.tcode != FWTCODE_WREQB) {
1888 printf("%s: tcode = %d\n", __func__, fp->mode.wreqb.tcode);
1889 return (RESP_TYPE_ERROR);
1892 sbp_targ_fetch_orb(sc, fwdev,
1893 ntohl(xfer->recv.payload[0]),
1894 ntohl(xfer->recv.payload[1]),
1901 sbp_targ_recv(struct fw_xfer *xfer)
1903 struct fw_pkt *fp, *sfp;
1904 struct fw_device *fwdev;
1907 struct sbp_targ_softc *sc;
1910 sc = (struct sbp_targ_softc *)xfer->sc;
1911 fp = &xfer->recv.hdr;
1912 fwdev = fw_noderesolve_nodeid(sc->fd.fc, fp->mode.wreqb.src & 0x3f);
1913 if (fwdev == NULL) {
1914 printf("%s: cannot resolve nodeid=%d\n",
1915 __func__, fp->mode.wreqb.src & 0x3f);
1916 rtcode = RESP_TYPE_ERROR; /* XXX */
1919 lo = fp->mode.wreqb.dest_lo;
1921 if (lo == SBP_TARG_BIND_LO(-1))
1922 rtcode = sbp_targ_mgm(xfer, fwdev);
1923 else if (lo >= SBP_TARG_BIND_LO(0))
1924 rtcode = sbp_targ_cmd(xfer, fwdev, SBP_TARG_LOGIN_ID(lo),
1927 rtcode = RESP_ADDRESS_ERROR;
1931 printf("%s: rtcode = %d\n", __func__, rtcode);
1932 sfp = &xfer->send.hdr;
1933 xfer->send.spd = FWSPD_S400;
1934 xfer->hand = sbp_targ_resp_callback;
1935 sfp->mode.wres.dst = fp->mode.wreqb.src;
1936 sfp->mode.wres.tlrt = fp->mode.wreqb.tlrt;
1937 sfp->mode.wres.tcode = FWTCODE_WRES;
1938 sfp->mode.wres.rtcode = rtcode;
1939 sfp->mode.wres.pri = 0;
1941 fw_asyreq(xfer->fc, -1, xfer);
1946 sbp_targ_attach(device_t dev)
1948 struct sbp_targ_softc *sc;
1949 struct cam_devq *devq;
1950 struct firewire_comm *fc;
1952 sc = (struct sbp_targ_softc *) device_get_softc(dev);
1953 bzero((void *)sc, sizeof(struct sbp_targ_softc));
1955 mtx_init(&sc->mtx, "sbp_targ", NULL, MTX_DEF);
1956 sc->fd.fc = fc = device_get_ivars(dev);
1958 sc->fd.post_explore = (void *) sbp_targ_post_explore;
1959 sc->fd.post_busreset = (void *) sbp_targ_post_busreset;
1961 devq = cam_simq_alloc(/*maxopenings*/MAX_LUN*MAX_INITIATORS);
1965 sc->sim = cam_sim_alloc(sbp_targ_action, sbp_targ_poll,
1966 "sbp_targ", sc, device_get_unit(dev), &sc->mtx,
1967 /*untagged*/ 1, /*tagged*/ 1, devq);
1968 if (sc->sim == NULL) {
1969 cam_simq_free(devq);
1974 if (xpt_bus_register(sc->sim, dev, /*bus*/0) != CAM_SUCCESS)
1977 if (xpt_create_path(&sc->path, /*periph*/ NULL, cam_sim_path(sc->sim),
1978 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1979 xpt_bus_deregister(cam_sim_path(sc->sim));
1984 sc->fwb.start = SBP_TARG_BIND_START;
1985 sc->fwb.end = SBP_TARG_BIND_END;
1987 /* pre-allocate xfer */
1988 STAILQ_INIT(&sc->fwb.xferlist);
1989 fw_xferlist_add(&sc->fwb.xferlist, M_SBP_TARG,
1990 /*send*/ 0, /*recv*/ SBP_TARG_RECV_LEN, MAX_LUN /* XXX */,
1991 fc, (void *)sc, sbp_targ_recv);
1992 fw_bindadd(fc, &sc->fwb);
1997 cam_sim_free(sc->sim, /*free_devq*/TRUE);
2002 sbp_targ_detach(device_t dev)
2004 struct sbp_targ_softc *sc;
2005 struct sbp_targ_lstate *lstate;
2008 sc = (struct sbp_targ_softc *)device_get_softc(dev);
2009 sc->fd.post_busreset = NULL;
2012 xpt_free_path(sc->path);
2013 xpt_bus_deregister(cam_sim_path(sc->sim));
2014 cam_sim_free(sc->sim, /*free_devq*/TRUE);
2017 for (i = 0; i < MAX_LUN; i++) {
2018 lstate = sc->lstate[i];
2019 if (lstate != NULL) {
2020 xpt_free_path(lstate->path);
2021 free(lstate, M_SBP_TARG);
2024 if (sc->black_hole != NULL) {
2025 xpt_free_path(sc->black_hole->path);
2026 free(sc->black_hole, M_SBP_TARG);
2029 fw_bindremove(sc->fd.fc, &sc->fwb);
2030 fw_xferlist_remove(&sc->fwb.xferlist);
2032 mtx_destroy(&sc->mtx);
2037 static devclass_t sbp_targ_devclass;
2039 static device_method_t sbp_targ_methods[] = {
2040 /* device interface */
2041 DEVMETHOD(device_identify, sbp_targ_identify),
2042 DEVMETHOD(device_probe, sbp_targ_probe),
2043 DEVMETHOD(device_attach, sbp_targ_attach),
2044 DEVMETHOD(device_detach, sbp_targ_detach),
2048 static driver_t sbp_targ_driver = {
2051 sizeof(struct sbp_targ_softc),
2054 DRIVER_MODULE(sbp_targ, firewire, sbp_targ_driver, sbp_targ_devclass, 0, 0);
2055 MODULE_VERSION(sbp_targ, 1);
2056 MODULE_DEPEND(sbp_targ, firewire, 1, 1, 1);
2057 MODULE_DEPEND(sbp_targ, cam, 1, 1, 1);