2 * SPDX-License-Identifier: BSD-4-Clause
5 * Hidetoshi Shimokawa. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
18 * This product includes software developed by Hidetoshi Shimokawa.
20 * 4. Neither the name of the author nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 #include <sys/param.h>
40 #include <sys/kernel.h>
41 #include <sys/systm.h>
42 #include <sys/sysctl.h>
43 #include <sys/types.h>
45 #include <sys/malloc.h>
46 #include <sys/endian.h>
49 #include <machine/bus.h>
51 #include <dev/firewire/firewire.h>
52 #include <dev/firewire/firewirereg.h>
53 #include <dev/firewire/iec13213.h>
54 #include <dev/firewire/sbp.h>
55 #include <dev/firewire/fwmem.h>
58 #include <cam/cam_ccb.h>
59 #include <cam/cam_sim.h>
60 #include <cam/cam_xpt_sim.h>
61 #include <cam/cam_debug.h>
62 #include <cam/cam_periph.h>
63 #include <cam/scsi/scsi_all.h>
64 #include <cam/scsi/scsi_message.h>
66 #define SBP_TARG_RECV_LEN 8
67 #define MAX_INITIATORS 8
72 * management/command block agent registers
74 * BASE 0xffff f001 0000 management port
75 * BASE 0xffff f001 0020 command port for login id 0
76 * BASE 0xffff f001 0040 command port for login id 1
79 #define SBP_TARG_MGM 0x10000 /* offset from 0xffff f000 000 */
80 #define SBP_TARG_BIND_HI 0xffff
81 #define SBP_TARG_BIND_LO(l) (0xf0000000 + SBP_TARG_MGM + 0x20 * ((l) + 1))
82 #define SBP_TARG_BIND_START (((u_int64_t)SBP_TARG_BIND_HI << 32) | \
84 #define SBP_TARG_BIND_END (((u_int64_t)SBP_TARG_BIND_HI << 32) | \
85 SBP_TARG_BIND_LO(MAX_LOGINS))
86 #define SBP_TARG_LOGIN_ID(lo) (((lo) - SBP_TARG_BIND_LO(0))/0x20)
90 #define FETCH_POINTER 2
92 #define F_LINK_ACTIVE (1 << 0)
93 #define F_ATIO_STARVED (1 << 1)
94 #define F_LOGIN (1 << 2)
95 #define F_HOLD (1 << 3)
96 #define F_FREEZED (1 << 4)
98 static MALLOC_DEFINE(M_SBP_TARG, "sbp_targ", "SBP-II/FireWire target mode");
100 static int debug = 0;
102 SYSCTL_INT(_debug, OID_AUTO, sbp_targ_debug, CTLFLAG_RW, &debug, 0,
103 "SBP target mode debug flag");
105 struct sbp_targ_login {
106 struct sbp_targ_lstate *lstate;
107 struct fw_device *fwdev;
108 struct sbp_login_res loginres;
113 STAILQ_HEAD(, orb_info) orbs;
114 STAILQ_ENTRY(sbp_targ_login) link;
119 struct callout hold_callout;
122 struct sbp_targ_lstate {
124 struct sbp_targ_softc *sc;
125 struct cam_path *path;
126 struct ccb_hdr_slist accept_tios;
127 struct ccb_hdr_slist immed_notifies;
128 struct crom_chunk model;
130 STAILQ_HEAD(, sbp_targ_login) logins;
133 struct sbp_targ_softc {
134 struct firewire_dev_comm fd;
136 struct cam_path *path;
140 struct crom_chunk unit;
141 struct sbp_targ_lstate *lstate[MAX_LUN];
142 struct sbp_targ_lstate *black_hole;
143 struct sbp_targ_login *logins[MAX_LOGINS];
146 #define SBP_LOCK(sc) mtx_lock(&(sc)->mtx)
147 #define SBP_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
150 #if BYTE_ORDER == BIG_ENDIAN
157 page_table_present:1,
161 uint32_t data_size:16,
163 page_table_present:1,
174 #if BYTE_ORDER == BIG_ENDIAN
191 * Urestricted page table format
192 * states that the segment length
193 * and high base addr are in the first
194 * 32 bits and the base low is in
197 struct unrestricted_page_table_fmt {
198 uint16_t segment_len;
199 uint16_t segment_base_high;
200 uint32_t segment_base_low;
205 struct sbp_targ_softc *sc;
206 struct fw_device *fwdev;
207 struct sbp_targ_login *login;
209 struct ccb_accept_tio *atio;
211 #define ORBI_STATUS_NONE 0
212 #define ORBI_STATUS_FETCH 1
213 #define ORBI_STATUS_ATIO 2
214 #define ORBI_STATUS_CTIO 3
215 #define ORBI_STATUS_STATUS 4
216 #define ORBI_STATUS_POINTER 5
217 #define ORBI_STATUS_ABORTED 7
224 STAILQ_ENTRY(orb_info) link;
226 struct unrestricted_page_table_fmt *page_table;
227 struct unrestricted_page_table_fmt *cur_pte;
228 struct unrestricted_page_table_fmt *last_pte;
229 uint32_t last_block_read;
230 struct sbp_status status;
233 static char *orb_fun_name[] = {
237 static void sbp_targ_recv(struct fw_xfer *);
238 static void sbp_targ_fetch_orb(struct sbp_targ_softc *, struct fw_device *,
239 uint16_t, uint32_t, struct sbp_targ_login *, int);
240 static void sbp_targ_xfer_pt(struct orb_info *);
241 static void sbp_targ_abort(struct sbp_targ_softc *, struct orb_info *);
244 sbp_targ_identify(driver_t *driver, device_t parent)
246 BUS_ADD_CHILD(parent, 0, "sbp_targ", device_get_unit(parent));
250 sbp_targ_probe(device_t dev)
254 pa = device_get_parent(dev);
255 if (device_get_unit(dev) != device_get_unit(pa)) {
259 device_set_desc(dev, "SBP-2/SCSI over FireWire target mode");
264 sbp_targ_dealloc_login(struct sbp_targ_login *login)
266 struct orb_info *orbi, *next;
269 printf("%s: login = NULL\n", __func__);
272 for (orbi = STAILQ_FIRST(&login->orbs); orbi != NULL; orbi = next) {
273 next = STAILQ_NEXT(orbi, link);
275 printf("%s: free orbi %p\n", __func__, orbi);
276 free(orbi, M_SBP_TARG);
279 callout_stop(&login->hold_callout);
281 STAILQ_REMOVE(&login->lstate->logins, login, sbp_targ_login, link);
282 login->lstate->sc->logins[login->id] = NULL;
284 printf("%s: free login %p\n", __func__, login);
285 free((void *)login, M_SBP_TARG);
290 sbp_targ_hold_expire(void *arg)
292 struct sbp_targ_login *login;
294 login = (struct sbp_targ_login *)arg;
296 if (login->flags & F_HOLD) {
297 printf("%s: login_id=%d expired\n", __func__, login->id);
298 sbp_targ_dealloc_login(login);
300 printf("%s: login_id=%d not hold\n", __func__, login->id);
305 sbp_targ_post_busreset(void *arg)
307 struct sbp_targ_softc *sc;
308 struct crom_src *src;
309 struct crom_chunk *root;
310 struct crom_chunk *unit;
311 struct sbp_targ_lstate *lstate;
312 struct sbp_targ_login *login;
315 sc = (struct sbp_targ_softc *)arg;
316 src = sc->fd.fc->crom_src;
317 root = sc->fd.fc->crom_root;
321 if ((sc->flags & F_FREEZED) == 0) {
322 sc->flags |= F_FREEZED;
323 xpt_freeze_simq(sc->sim, /*count*/1);
325 printf("%s: already freezed\n", __func__);
328 bzero(unit, sizeof(struct crom_chunk));
330 crom_add_chunk(src, root, unit, CROM_UDIR);
331 crom_add_entry(unit, CSRKEY_SPEC, CSRVAL_ANSIT10);
332 crom_add_entry(unit, CSRKEY_VER, CSRVAL_T10SBP2);
333 crom_add_entry(unit, CSRKEY_COM_SPEC, CSRVAL_ANSIT10);
334 crom_add_entry(unit, CSRKEY_COM_SET, CSRVAL_SCSI);
336 crom_add_entry(unit, CROM_MGM, SBP_TARG_MGM >> 2);
337 crom_add_entry(unit, CSRKEY_UNIT_CH, (10<<8) | 8);
339 for (i = 0; i < MAX_LUN; i++) {
340 lstate = sc->lstate[i];
343 crom_add_entry(unit, CSRKEY_FIRM_VER, 1);
344 crom_add_entry(unit, CROM_LUN, i);
345 crom_add_entry(unit, CSRKEY_MODEL, 1);
346 crom_add_simple_text(src, unit, &lstate->model, "TargetMode");
349 /* Process for reconnection hold time */
350 for (i = 0; i < MAX_LOGINS; i++) {
351 login = sc->logins[i];
354 sbp_targ_abort(sc, STAILQ_FIRST(&login->orbs));
355 if (login->flags & F_LOGIN) {
356 login->flags |= F_HOLD;
357 callout_reset(&login->hold_callout,
358 hz * login->hold_sec,
359 sbp_targ_hold_expire, (void *)login);
365 sbp_targ_post_explore(void *arg)
367 struct sbp_targ_softc *sc;
369 sc = (struct sbp_targ_softc *)arg;
370 sc->flags &= ~F_FREEZED;
371 xpt_release_simq(sc->sim, /*run queue*/TRUE);
376 sbp_targ_find_devs(struct sbp_targ_softc *sc, union ccb *ccb,
377 struct sbp_targ_lstate **lstate, int notfound_failure)
381 /* XXX 0 is the only vaild target_id */
382 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD &&
383 ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
384 *lstate = sc->black_hole;
386 printf("setting black hole for this target id(%d)\n", ccb->ccb_h.target_id);
387 return (CAM_REQ_CMP);
390 lun = ccb->ccb_h.target_lun;
392 return (CAM_LUN_INVALID);
394 *lstate = sc->lstate[lun];
396 if (notfound_failure != 0 && *lstate == NULL) {
398 printf("%s: lstate for lun is invalid, target(%d), lun(%d)\n",
399 __func__, ccb->ccb_h.target_id, lun);
400 return (CAM_PATH_INVALID);
403 printf("%s: setting lstate for tgt(%d) lun(%d)\n",
404 __func__,ccb->ccb_h.target_id, lun);
406 return (CAM_REQ_CMP);
410 sbp_targ_en_lun(struct sbp_targ_softc *sc, union ccb *ccb)
412 struct ccb_en_lun *cel = &ccb->cel;
413 struct sbp_targ_lstate *lstate;
416 status = sbp_targ_find_devs(sc, ccb, &lstate, 0);
417 if (status != CAM_REQ_CMP) {
418 ccb->ccb_h.status = status;
422 if (cel->enable != 0) {
423 if (lstate != NULL) {
424 xpt_print_path(ccb->ccb_h.path);
425 printf("Lun already enabled\n");
426 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
429 if (cel->grp6_len != 0 || cel->grp7_len != 0) {
430 ccb->ccb_h.status = CAM_REQ_INVALID;
431 printf("Non-zero Group Codes\n");
434 lstate = (struct sbp_targ_lstate *)
435 malloc(sizeof(*lstate), M_SBP_TARG, M_NOWAIT | M_ZERO);
436 if (lstate == NULL) {
437 xpt_print_path(ccb->ccb_h.path);
438 printf("Couldn't allocate lstate\n");
439 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
443 printf("%s: malloc'd lstate %p\n",__func__, lstate);
445 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD) {
446 sc->black_hole = lstate;
448 printf("Blackhole set due to target id == %d\n",
449 ccb->ccb_h.target_id);
451 sc->lstate[ccb->ccb_h.target_lun] = lstate;
453 memset(lstate, 0, sizeof(*lstate));
455 status = xpt_create_path(&lstate->path, /*periph*/NULL,
456 xpt_path_path_id(ccb->ccb_h.path),
457 xpt_path_target_id(ccb->ccb_h.path),
458 xpt_path_lun_id(ccb->ccb_h.path));
459 if (status != CAM_REQ_CMP) {
460 free(lstate, M_SBP_TARG);
462 xpt_print_path(ccb->ccb_h.path);
463 printf("Couldn't allocate path\n");
464 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
467 SLIST_INIT(&lstate->accept_tios);
468 SLIST_INIT(&lstate->immed_notifies);
469 STAILQ_INIT(&lstate->logins);
471 ccb->ccb_h.status = CAM_REQ_CMP;
472 xpt_print_path(ccb->ccb_h.path);
473 printf("Lun now enabled for target mode\n");
475 sc->fd.fc->ibr(sc->fd.fc);
477 struct sbp_targ_login *login, *next;
479 if (lstate == NULL) {
480 ccb->ccb_h.status = CAM_LUN_INVALID;
481 printf("Invalid lstate for this target\n");
484 ccb->ccb_h.status = CAM_REQ_CMP;
486 if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
487 printf("ATIOs pending\n");
488 ccb->ccb_h.status = CAM_REQ_INVALID;
491 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
492 printf("INOTs pending\n");
493 ccb->ccb_h.status = CAM_REQ_INVALID;
496 if (ccb->ccb_h.status != CAM_REQ_CMP) {
497 printf("status != CAM_REQ_CMP\n");
501 xpt_print_path(ccb->ccb_h.path);
502 printf("Target mode disabled\n");
503 xpt_free_path(lstate->path);
505 for (login = STAILQ_FIRST(&lstate->logins); login != NULL;
507 next = STAILQ_NEXT(login, link);
508 sbp_targ_dealloc_login(login);
511 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD)
512 sc->black_hole = NULL;
514 sc->lstate[ccb->ccb_h.target_lun] = NULL;
516 printf("%s: free lstate %p\n", __func__, lstate);
517 free(lstate, M_SBP_TARG);
521 sc->fd.fc->ibr(sc->fd.fc);
526 sbp_targ_send_lstate_events(struct sbp_targ_softc *sc,
527 struct sbp_targ_lstate *lstate)
530 struct ccb_hdr *ccbh;
531 struct ccb_immediate_notify *inot;
533 printf("%s: not implemented yet\n", __func__);
539 sbp_targ_remove_orb_info_locked(struct sbp_targ_login *login, struct orb_info *orbi)
541 STAILQ_REMOVE(&login->orbs, orbi, orb_info, link);
545 sbp_targ_remove_orb_info(struct sbp_targ_login *login, struct orb_info *orbi)
548 STAILQ_REMOVE(&login->orbs, orbi, orb_info, link);
549 SBP_UNLOCK(orbi->sc);
553 * tag_id/init_id encoding
555 * tag_id and init_id has only 32bit for each.
556 * scsi_target can handle very limited number(up to 15) of init_id.
557 * we have to encode 48bit orb and 64bit EUI64 into these
560 * tag_id represents lower 32bit of ORB address.
561 * init_id represents login_id.
565 static struct orb_info *
566 sbp_targ_get_orb_info(struct sbp_targ_lstate *lstate,
567 u_int tag_id, u_int init_id)
569 struct sbp_targ_login *login;
570 struct orb_info *orbi;
572 login = lstate->sc->logins[init_id];
574 printf("%s: no such login\n", __func__);
577 STAILQ_FOREACH(orbi, &login->orbs, link)
578 if (orbi->orb_lo == tag_id)
580 printf("%s: orb not found tag_id=0x%08x init_id=%d\n",
581 __func__, tag_id, init_id);
588 sbp_targ_abort(struct sbp_targ_softc *sc, struct orb_info *orbi)
590 struct orb_info *norbi;
593 for (; orbi != NULL; orbi = norbi) {
594 printf("%s: status=%d ccb=%p\n", __func__, orbi->state, orbi->ccb);
595 norbi = STAILQ_NEXT(orbi, link);
596 if (orbi->state != ORBI_STATUS_ABORTED) {
597 if (orbi->ccb != NULL) {
598 orbi->ccb->ccb_h.status = CAM_REQ_ABORTED;
602 if (orbi->state <= ORBI_STATUS_ATIO) {
603 sbp_targ_remove_orb_info_locked(orbi->login, orbi);
605 printf("%s: free orbi %p\n", __func__, orbi);
606 free(orbi, M_SBP_TARG);
609 orbi->state = ORBI_STATUS_ABORTED;
616 sbp_targ_free_orbi(struct fw_xfer *xfer)
618 struct orb_info *orbi;
620 if (xfer->resp != 0) {
622 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
624 orbi = (struct orb_info *)xfer->sc;
625 if ( orbi->page_table != NULL ) {
627 printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table);
628 free(orbi->page_table, M_SBP_TARG);
629 orbi->page_table = NULL;
632 printf("%s: free orbi %p\n", __func__, orbi);
633 free(orbi, M_SBP_TARG);
639 sbp_targ_status_FIFO(struct orb_info *orbi,
640 uint32_t fifo_hi, uint32_t fifo_lo, int dequeue)
642 struct fw_xfer *xfer;
645 sbp_targ_remove_orb_info(orbi->login, orbi);
647 xfer = fwmem_write_block(orbi->fwdev, (void *)orbi,
648 /*spd*/FWSPD_S400, fifo_hi, fifo_lo,
649 sizeof(uint32_t) * (orbi->status.len + 1), (char *)&orbi->status,
654 printf("%s: xfer == NULL\n", __func__);
659 * Generate the appropriate CAM status for the
663 sbp_targ_send_status(struct orb_info *orbi, union ccb *ccb)
665 struct sbp_status *sbp_status;
667 struct orb_info *norbi;
670 sbp_status = &orbi->status;
672 orbi->state = ORBI_STATUS_STATUS;
674 sbp_status->resp = 0; /* XXX */
675 sbp_status->status = 0; /* XXX */
676 sbp_status->dead = 0; /* XXX */
678 ccb->ccb_h.status= CAM_REQ_CMP;
680 switch (ccb->csio.scsi_status) {
683 printf("%s: STATUS_OK\n", __func__);
686 case SCSI_STATUS_CHECK_COND:
688 printf("%s: STATUS SCSI_STATUS_CHECK_COND\n", __func__);
689 goto process_scsi_status;
690 case SCSI_STATUS_BUSY:
692 printf("%s: STATUS SCSI_STATUS_BUSY\n", __func__);
693 goto process_scsi_status;
694 case SCSI_STATUS_CMD_TERMINATED:
697 struct sbp_cmd_status *sbp_cmd_status;
698 struct scsi_sense_data *sense;
699 int error_code, sense_key, asc, ascq;
706 sbp_cmd_status = (struct sbp_cmd_status *)&sbp_status->data[0];
707 sbp_cmd_status->status = ccb->csio.scsi_status;
708 sense = &ccb->csio.sense_data;
710 #if 0 /* XXX What we should do? */
712 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link));
714 norbi = STAILQ_NEXT(orbi, link);
716 printf("%s: status=%d\n", __func__, norbi->state);
717 if (norbi->ccb != NULL) {
718 norbi->ccb->ccb_h.status = CAM_REQ_ABORTED;
719 xpt_done(norbi->ccb);
722 sbp_targ_remove_orb_info_locked(orbi->login, norbi);
723 norbi = STAILQ_NEXT(norbi, link);
724 free(norbi, M_SBP_TARG);
729 sense_len = ccb->csio.sense_len - ccb->csio.sense_resid;
730 scsi_extract_sense_len(sense, sense_len, &error_code,
731 &sense_key, &asc, &ascq, /*show_errors*/ 0);
733 switch (error_code) {
734 case SSD_CURRENT_ERROR:
735 case SSD_DESC_CURRENT_ERROR:
736 sbp_cmd_status->sfmt = SBP_SFMT_CURR;
739 sbp_cmd_status->sfmt = SBP_SFMT_DEFER;
743 if (scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &info,
746 sbp_cmd_status->valid = 1;
749 sbp_cmd_status->info = htobe32(info_trunc);
751 sbp_cmd_status->valid = 0;
754 sbp_cmd_status->s_key = sense_key;
756 if (scsi_get_stream_info(sense, sense_len, NULL,
757 &stream_bits) == 0) {
758 sbp_cmd_status->mark =
759 (stream_bits & SSD_FILEMARK) ? 1 : 0;
760 sbp_cmd_status->eom =
761 (stream_bits & SSD_EOM) ? 1 : 0;
762 sbp_cmd_status->ill_len =
763 (stream_bits & SSD_ILI) ? 1 : 0;
765 sbp_cmd_status->mark = 0;
766 sbp_cmd_status->eom = 0;
767 sbp_cmd_status->ill_len = 0;
771 /* add_sense_code(_qual), info, cmd_spec_info */
774 if (scsi_get_sense_info(sense, sense_len, SSD_DESC_COMMAND,
775 &info, &sinfo) == 0) {
776 uint32_t cmdspec_trunc;
778 cmdspec_trunc = info;
780 sbp_cmd_status->cdb = htobe32(cmdspec_trunc);
783 sbp_cmd_status->s_code = asc;
784 sbp_cmd_status->s_qlfr = ascq;
786 if (scsi_get_sense_info(sense, sense_len, SSD_DESC_FRU, &info,
788 sbp_cmd_status->fru = (uint8_t)info;
791 sbp_cmd_status->fru = 0;
794 if (scsi_get_sks(sense, sense_len, sks) == 0) {
795 bcopy(sks, &sbp_cmd_status->s_keydep[0], sizeof(sks));
797 ccb->ccb_h.status |= CAM_SENT_SENSE;
803 printf("%s: unknown scsi status 0x%x\n", __func__,
808 sbp_targ_status_FIFO(orbi,
809 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1);
813 * Invoked as a callback handler from fwmem_read/write_block
815 * Process read/write of initiator address space
816 * completion and pass status onto the backend target.
817 * If this is a partial read/write for a CCB then
818 * we decrement the orbi's refcount to indicate
819 * the status of the read/write is complete
822 sbp_targ_cam_done(struct fw_xfer *xfer)
824 struct orb_info *orbi;
827 orbi = (struct orb_info *)xfer->sc;
830 printf("%s: resp=%d refcount=%d\n", __func__,
831 xfer->resp, orbi->refcount);
833 if (xfer->resp != 0) {
834 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
835 orbi->status.resp = SBP_TRANS_FAIL;
836 orbi->status.status = OBJ_DATA | SBE_TIMEOUT/*XXX*/;
837 orbi->status.dead = 1;
838 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link));
844 if (orbi->refcount == 0) {
846 if (orbi->state == ORBI_STATUS_ABORTED) {
848 printf("%s: orbi aborted\n", __func__);
849 sbp_targ_remove_orb_info(orbi->login, orbi);
850 if (orbi->page_table != NULL) {
852 printf("%s: free orbi->page_table %p\n",
853 __func__, orbi->page_table);
854 free(orbi->page_table, M_SBP_TARG);
857 printf("%s: free orbi %p\n", __func__, orbi);
858 free(orbi, M_SBP_TARG);
860 } else if (orbi->status.resp == ORBI_STATUS_NONE) {
861 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
863 printf("%s: CAM_SEND_STATUS set %0x\n", __func__, ccb->ccb_h.flags);
864 sbp_targ_send_status(orbi, ccb);
867 printf("%s: CAM_SEND_STATUS not set %0x\n", __func__, ccb->ccb_h.flags);
868 ccb->ccb_h.status = CAM_REQ_CMP;
872 orbi->status.len = 1;
873 sbp_targ_status_FIFO(orbi,
874 orbi->login->fifo_hi, orbi->login->fifo_lo,
876 ccb->ccb_h.status = CAM_REQ_ABORTED;
885 sbp_targ_abort_ccb(struct sbp_targ_softc *sc, union ccb *ccb)
888 struct sbp_targ_lstate *lstate;
889 struct ccb_hdr_slist *list;
890 struct ccb_hdr *curelm;
894 status = sbp_targ_find_devs(sc, ccb, &lstate, 0);
895 if (status != CAM_REQ_CMP)
898 accb = ccb->cab.abort_ccb;
900 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO)
901 list = &lstate->accept_tios;
902 else if (accb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY)
903 list = &lstate->immed_notifies;
905 return (CAM_UA_ABORT);
907 curelm = SLIST_FIRST(list);
909 if (curelm == &accb->ccb_h) {
911 SLIST_REMOVE_HEAD(list, sim_links.sle);
913 while (curelm != NULL) {
914 struct ccb_hdr *nextelm;
916 nextelm = SLIST_NEXT(curelm, sim_links.sle);
917 if (nextelm == &accb->ccb_h) {
919 SLIST_NEXT(curelm, sim_links.sle) =
920 SLIST_NEXT(nextelm, sim_links.sle);
927 accb->ccb_h.status = CAM_REQ_ABORTED;
929 return (CAM_REQ_CMP);
931 printf("%s: not found\n", __func__);
932 return (CAM_PATH_INVALID);
936 * directly execute a read or write to the initiator
937 * address space and set hand(sbp_targ_cam_done) to
938 * process the completion from the SIM to the target.
939 * set orbi->refcount to inidicate that a read/write
940 * is inflight to/from the initiator.
943 sbp_targ_xfer_buf(struct orb_info *orbi, u_int offset,
944 uint16_t dst_hi, uint32_t dst_lo, u_int size,
945 void (*hand)(struct fw_xfer *))
947 struct fw_xfer *xfer;
948 u_int len, ccb_dir, off = 0;
952 printf("%s: offset=%d size=%d\n", __func__, offset, size);
953 ccb_dir = orbi->ccb->ccb_h.flags & CAM_DIR_MASK;
954 ptr = (char *)orbi->ccb->csio.data_ptr + offset;
957 /* XXX assume dst_lo + off doesn't overflow */
958 len = MIN(size, 2048 /* XXX */);
961 if (ccb_dir == CAM_DIR_OUT) {
963 printf("%s: CAM_DIR_OUT --> read block in?\n",__func__);
964 xfer = fwmem_read_block(orbi->fwdev,
965 (void *)orbi, /*spd*/FWSPD_S400,
966 dst_hi, dst_lo + off, len,
970 printf("%s: CAM_DIR_IN --> write block out?\n",__func__);
971 xfer = fwmem_write_block(orbi->fwdev,
972 (void *)orbi, /*spd*/FWSPD_S400,
973 dst_hi, dst_lo + off, len,
977 printf("%s: xfer == NULL", __func__);
978 /* XXX what should we do?? */
986 sbp_targ_pt_done(struct fw_xfer *xfer)
988 struct orb_info *orbi;
989 struct unrestricted_page_table_fmt *pt;
992 orbi = (struct orb_info *)xfer->sc;
994 if (orbi->state == ORBI_STATUS_ABORTED) {
996 printf("%s: orbi aborted\n", __func__);
997 sbp_targ_remove_orb_info(orbi->login, orbi);
999 printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table);
1000 printf("%s: free orbi %p\n", __func__, orbi);
1002 free(orbi->page_table, M_SBP_TARG);
1003 free(orbi, M_SBP_TARG);
1008 if (xfer->resp != 0) {
1009 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
1010 orbi->status.resp = SBP_TRANS_FAIL;
1011 orbi->status.status = OBJ_PT | SBE_TIMEOUT/*XXX*/;
1012 orbi->status.dead = 1;
1013 orbi->status.len = 1;
1014 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link));
1017 printf("%s: free orbi->page_table %p\n", __func__, orbi->page_table);
1019 sbp_targ_status_FIFO(orbi,
1020 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1);
1021 free(orbi->page_table, M_SBP_TARG);
1022 orbi->page_table = NULL;
1028 * Set endianness here so we don't have
1029 * to deal with is later
1031 for (i = 0, pt = orbi->page_table; i < orbi->orb4.data_size; i++, pt++) {
1032 pt->segment_len = ntohs(pt->segment_len);
1034 printf("%s:segment_len = %u\n", __func__,pt->segment_len);
1035 pt->segment_base_high = ntohs(pt->segment_base_high);
1036 pt->segment_base_low = ntohl(pt->segment_base_low);
1039 sbp_targ_xfer_pt(orbi);
1042 if (orbi->refcount == 0)
1043 printf("%s: refcount == 0\n", __func__);
1049 static void sbp_targ_xfer_pt(struct orb_info *orbi)
1052 uint32_t res, offset, len;
1056 printf("%s: dxfer_len=%d\n", __func__, ccb->csio.dxfer_len);
1057 res = ccb->csio.dxfer_len;
1059 * If the page table required multiple CTIO's to
1060 * complete, then cur_pte is non NULL
1061 * and we need to start from the last position
1062 * If this is the first pass over a page table
1063 * then we just start at the beginning of the page
1066 * Parse the unrestricted page table and figure out where we need
1067 * to shove the data from this read request.
1069 for (offset = 0, len = 0; (res != 0) && (orbi->cur_pte < orbi->last_pte); offset += len) {
1070 len = MIN(orbi->cur_pte->segment_len, res);
1073 printf("%s:page_table: %04x:%08x segment_len(%u) res(%u) len(%u)\n",
1074 __func__, orbi->cur_pte->segment_base_high,
1075 orbi->cur_pte->segment_base_low,
1076 orbi->cur_pte->segment_len,
1078 sbp_targ_xfer_buf(orbi, offset,
1079 orbi->cur_pte->segment_base_high,
1080 orbi->cur_pte->segment_base_low,
1081 len, sbp_targ_cam_done);
1083 * If we have only written partially to
1084 * this page table, then we need to save
1085 * our position for the next CTIO. If we
1086 * have completed the page table, then we
1087 * are safe to move on to the next entry.
1089 if (len == orbi->cur_pte->segment_len) {
1092 uint32_t saved_base_low;
1094 /* Handle transfers that cross a 4GB boundary. */
1095 saved_base_low = orbi->cur_pte->segment_base_low;
1096 orbi->cur_pte->segment_base_low += len;
1097 if (orbi->cur_pte->segment_base_low < saved_base_low)
1098 orbi->cur_pte->segment_base_high++;
1100 orbi->cur_pte->segment_len -= len;
1104 printf("%s: base_low(%08x) page_table_off(%p) last_block(%u)\n",
1105 __func__, orbi->cur_pte->segment_base_low,
1106 orbi->cur_pte, orbi->last_block_read);
1109 printf("Warning - short pt encountered. "
1110 "Could not transfer all data.\n");
1115 * Create page table in local memory
1116 * and transfer it from the initiator
1117 * in order to know where we are supposed
1122 sbp_targ_fetch_pt(struct orb_info *orbi)
1124 struct fw_xfer *xfer;
1127 * Pull in page table from initiator
1128 * and setup for data from our
1131 if (orbi->page_table == NULL) {
1132 orbi->page_table = malloc(orbi->orb4.data_size*
1133 sizeof(struct unrestricted_page_table_fmt),
1134 M_SBP_TARG, M_NOWAIT|M_ZERO);
1135 if (orbi->page_table == NULL)
1137 orbi->cur_pte = orbi->page_table;
1138 orbi->last_pte = orbi->page_table + orbi->orb4.data_size;
1139 orbi->last_block_read = orbi->orb4.data_size;
1140 if (debug && orbi->page_table != NULL)
1141 printf("%s: malloc'd orbi->page_table(%p), orb4.data_size(%u)\n",
1142 __func__, orbi->page_table, orbi->orb4.data_size);
1144 xfer = fwmem_read_block(orbi->fwdev, (void *)orbi, /*spd*/FWSPD_S400,
1145 orbi->data_hi, orbi->data_lo, orbi->orb4.data_size*
1146 sizeof(struct unrestricted_page_table_fmt),
1147 (void *)orbi->page_table, sbp_targ_pt_done);
1153 * This is a CTIO for a page table we have
1154 * already malloc'd, so just directly invoke
1155 * the xfer function on the orbi.
1157 sbp_targ_xfer_pt(orbi);
1161 orbi->ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1163 printf("%s: free orbi->page_table %p due to xfer == NULL\n", __func__, orbi->page_table);
1164 if (orbi->page_table != NULL) {
1165 free(orbi->page_table, M_SBP_TARG);
1166 orbi->page_table = NULL;
1168 xpt_done(orbi->ccb);
1173 sbp_targ_action1(struct cam_sim *sim, union ccb *ccb)
1175 struct sbp_targ_softc *sc;
1176 struct sbp_targ_lstate *lstate;
1180 sc = (struct sbp_targ_softc *)cam_sim_softc(sim);
1182 status = sbp_targ_find_devs(sc, ccb, &lstate, TRUE);
1184 switch (ccb->ccb_h.func_code) {
1185 case XPT_CONT_TARGET_IO:
1187 struct orb_info *orbi;
1190 printf("%s: XPT_CONT_TARGET_IO (0x%08x)\n",
1191 __func__, ccb->csio.tag_id);
1193 if (status != CAM_REQ_CMP) {
1194 ccb->ccb_h.status = status;
1198 /* XXX transfer from/to initiator */
1199 orbi = sbp_targ_get_orb_info(lstate,
1200 ccb->csio.tag_id, ccb->csio.init_id);
1202 ccb->ccb_h.status = CAM_REQ_ABORTED; /* XXX */
1206 if (orbi->state == ORBI_STATUS_ABORTED) {
1208 printf("%s: ctio aborted\n", __func__);
1209 sbp_targ_remove_orb_info_locked(orbi->login, orbi);
1211 printf("%s: free orbi %p\n", __func__, orbi);
1212 free(orbi, M_SBP_TARG);
1213 ccb->ccb_h.status = CAM_REQ_ABORTED;
1217 orbi->state = ORBI_STATUS_CTIO;
1220 ccb_dir = ccb->ccb_h.flags & CAM_DIR_MASK;
1223 if (ccb->csio.dxfer_len == 0)
1224 ccb_dir = CAM_DIR_NONE;
1227 if (ccb_dir == CAM_DIR_IN && orbi->orb4.dir == 0)
1228 printf("%s: direction mismatch\n", __func__);
1230 /* check page table */
1231 if (ccb_dir != CAM_DIR_NONE && orbi->orb4.page_table_present) {
1233 printf("%s: page_table_present\n",
1235 if (orbi->orb4.page_size != 0) {
1236 printf("%s: unsupported pagesize %d != 0\n",
1237 __func__, orbi->orb4.page_size);
1238 ccb->ccb_h.status = CAM_REQ_INVALID;
1242 sbp_targ_fetch_pt(orbi);
1247 if (ccb_dir != CAM_DIR_NONE) {
1248 sbp_targ_xfer_buf(orbi, 0, orbi->data_hi,
1250 MIN(orbi->orb4.data_size, ccb->csio.dxfer_len),
1252 if ( orbi->orb4.data_size > ccb->csio.dxfer_len ) {
1253 orbi->data_lo += ccb->csio.dxfer_len;
1254 orbi->orb4.data_size -= ccb->csio.dxfer_len;
1258 if (ccb_dir == CAM_DIR_NONE) {
1259 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1262 sbp_targ_send_status(orbi, ccb);
1265 ccb->ccb_h.status = CAM_REQ_CMP;
1270 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
1271 if (status != CAM_REQ_CMP) {
1272 ccb->ccb_h.status = status;
1276 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h,
1278 ccb->ccb_h.status = CAM_REQ_INPROG;
1279 if ((lstate->flags & F_ATIO_STARVED) != 0) {
1280 struct sbp_targ_login *login;
1283 printf("%s: new atio arrived\n", __func__);
1284 lstate->flags &= ~F_ATIO_STARVED;
1285 STAILQ_FOREACH(login, &lstate->logins, link)
1286 if ((login->flags & F_ATIO_STARVED) != 0) {
1287 login->flags &= ~F_ATIO_STARVED;
1288 sbp_targ_fetch_orb(lstate->sc,
1290 login->last_hi, login->last_lo,
1295 case XPT_NOTIFY_ACKNOWLEDGE: /* recycle notify ack */
1296 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */
1297 if (status != CAM_REQ_CMP) {
1298 ccb->ccb_h.status = status;
1302 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h,
1304 ccb->ccb_h.status = CAM_REQ_INPROG;
1305 sbp_targ_send_lstate_events(sc, lstate);
1308 sbp_targ_en_lun(sc, ccb);
1313 struct ccb_pathinq *cpi = &ccb->cpi;
1315 cpi->version_num = 1; /* XXX??? */
1316 cpi->hba_inquiry = PI_TAG_ABLE;
1317 cpi->target_sprt = PIT_PROCESSOR
1320 cpi->transport = XPORT_SPI; /* FIXME add XPORT_FW type to cam */
1321 cpi->hba_misc = PIM_NOINITIATOR | PIM_NOBUSRESET |
1323 cpi->hba_eng_cnt = 0;
1324 cpi->max_target = 7; /* XXX */
1325 cpi->max_lun = MAX_LUN - 1;
1326 cpi->initiator_id = 7; /* XXX */
1327 cpi->bus_id = sim->bus_id;
1328 cpi->base_transfer_speed = 400 * 1000 / 8;
1329 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1330 strlcpy(cpi->hba_vid, "SBP_TARG", HBA_IDLEN);
1331 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
1332 cpi->unit_number = sim->unit_number;
1334 cpi->ccb_h.status = CAM_REQ_CMP;
1340 union ccb *accb = ccb->cab.abort_ccb;
1342 switch (accb->ccb_h.func_code) {
1343 case XPT_ACCEPT_TARGET_IO:
1344 case XPT_IMMEDIATE_NOTIFY:
1345 ccb->ccb_h.status = sbp_targ_abort_ccb(sc, ccb);
1347 case XPT_CONT_TARGET_IO:
1349 ccb->ccb_h.status = CAM_UA_ABORT;
1352 printf("%s: aborting unknown function %d\n",
1353 __func__, accb->ccb_h.func_code);
1354 ccb->ccb_h.status = CAM_REQ_INVALID;
1360 #ifdef CAM_NEW_TRAN_CODE
1361 case XPT_SET_TRAN_SETTINGS:
1362 ccb->ccb_h.status = CAM_REQ_INVALID;
1365 case XPT_GET_TRAN_SETTINGS:
1367 struct ccb_trans_settings *cts = &ccb->cts;
1368 struct ccb_trans_settings_scsi *scsi =
1369 &cts->proto_specific.scsi;
1370 struct ccb_trans_settings_spi *spi =
1371 &cts->xport_specific.spi;
1373 cts->protocol = PROTO_SCSI;
1374 cts->protocol_version = SCSI_REV_2;
1375 cts->transport = XPORT_FW; /* should have a FireWire */
1376 cts->transport_version = 2;
1377 spi->valid = CTS_SPI_VALID_DISC;
1378 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
1379 scsi->valid = CTS_SCSI_VALID_TQ;
1380 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1382 printf("%s:%d:%d XPT_GET_TRAN_SETTINGS:\n",
1383 device_get_nameunit(sc->fd.dev),
1384 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
1386 cts->ccb_h.status = CAM_REQ_CMP;
1393 printf("%s: unknown function 0x%x\n",
1394 __func__, ccb->ccb_h.func_code);
1395 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1403 sbp_targ_action(struct cam_sim *sim, union ccb *ccb)
1408 sbp_targ_action1(sim, ccb);
1413 sbp_targ_poll(struct cam_sim *sim)
1420 sbp_targ_cmd_handler(struct fw_xfer *xfer)
1425 struct orb_info *orbi;
1426 struct ccb_accept_tio *atio;
1430 orbi = (struct orb_info *)xfer->sc;
1431 if (xfer->resp != 0) {
1432 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
1433 orbi->status.resp = SBP_TRANS_FAIL;
1434 orbi->status.status = OBJ_ORB | SBE_TIMEOUT/*XXX*/;
1435 orbi->status.dead = 1;
1436 orbi->status.len = 1;
1437 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link));
1439 sbp_targ_status_FIFO(orbi,
1440 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/1);
1444 fp = &xfer->recv.hdr;
1448 if (orbi->state == ORBI_STATUS_ABORTED) {
1449 printf("%s: aborted\n", __func__);
1450 sbp_targ_remove_orb_info(orbi->login, orbi);
1451 free(orbi, M_SBP_TARG);
1452 atio->ccb_h.status = CAM_REQ_ABORTED;
1453 xpt_done((union ccb*)atio);
1456 orbi->state = ORBI_STATUS_ATIO;
1459 /* swap payload except SCSI command */
1460 for (i = 0; i < 5; i++)
1461 orb[i] = ntohl(orb[i]);
1463 orb4 = (struct corb4 *)&orb[4];
1464 if (orb4->rq_fmt != 0) {
1466 printf("%s: rq_fmt(%d) != 0\n", __func__, orb4->rq_fmt);
1469 atio->ccb_h.target_id = 0; /* XXX */
1470 atio->ccb_h.target_lun = orbi->login->lstate->lun;
1471 atio->sense_len = 0;
1472 atio->tag_action = MSG_SIMPLE_TASK;
1473 atio->tag_id = orbi->orb_lo;
1474 atio->init_id = orbi->login->id;
1476 atio->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1477 bytes = (u_char *)&orb[5];
1479 printf("%s: %p %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
1480 __func__, (void *)atio,
1481 bytes[0], bytes[1], bytes[2], bytes[3], bytes[4],
1482 bytes[5], bytes[6], bytes[7], bytes[8], bytes[9]);
1483 switch (bytes[0] >> 5) {
1499 /* Only copy the opcode. */
1501 printf("Reserved or VU command code type encountered\n");
1505 memcpy(atio->cdb_io.cdb_bytes, bytes, atio->cdb_len);
1507 atio->ccb_h.status |= CAM_CDB_RECVD;
1510 if ((orb[0] & (1<<31)) == 0) {
1512 printf("%s: fetch next orb\n", __func__);
1513 orbi->status.src = SRC_NEXT_EXISTS;
1514 sbp_targ_fetch_orb(orbi->sc, orbi->fwdev,
1515 orb[0], orb[1], orbi->login, FETCH_CMD);
1517 orbi->status.src = SRC_NO_NEXT;
1518 orbi->login->flags &= ~F_LINK_ACTIVE;
1521 orbi->data_hi = orb[2];
1522 orbi->data_lo = orb[3];
1525 xpt_done((union ccb*)atio);
1531 static struct sbp_targ_login *
1532 sbp_targ_get_login(struct sbp_targ_softc *sc, struct fw_device *fwdev, int lun)
1534 struct sbp_targ_lstate *lstate;
1535 struct sbp_targ_login *login;
1538 lstate = sc->lstate[lun];
1540 STAILQ_FOREACH(login, &lstate->logins, link)
1541 if (login->fwdev == fwdev)
1544 for (i = 0; i < MAX_LOGINS; i++)
1545 if (sc->logins[i] == NULL)
1548 printf("%s: increase MAX_LOGIN\n", __func__);
1552 login = (struct sbp_targ_login *)malloc(
1553 sizeof(struct sbp_targ_login), M_SBP_TARG, M_NOWAIT | M_ZERO);
1555 if (login == NULL) {
1556 printf("%s: malloc failed\n", __func__);
1561 login->fwdev = fwdev;
1562 login->lstate = lstate;
1563 login->last_hi = 0xffff;
1564 login->last_lo = 0xffffffff;
1565 login->hold_sec = 1;
1566 STAILQ_INIT(&login->orbs);
1567 CALLOUT_INIT(&login->hold_callout);
1568 sc->logins[i] = login;
1573 sbp_targ_mgm_handler(struct fw_xfer *xfer)
1575 struct sbp_targ_lstate *lstate;
1576 struct sbp_targ_login *login;
1580 struct orb_info *orbi;
1583 orbi = (struct orb_info *)xfer->sc;
1584 if (xfer->resp != 0) {
1585 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
1586 orbi->status.resp = SBP_TRANS_FAIL;
1587 orbi->status.status = OBJ_ORB | SBE_TIMEOUT/*XXX*/;
1588 orbi->status.dead = 1;
1589 orbi->status.len = 1;
1590 sbp_targ_abort(orbi->sc, STAILQ_NEXT(orbi, link));
1592 sbp_targ_status_FIFO(orbi,
1593 orbi->login->fifo_hi, orbi->login->fifo_lo, /*dequeue*/0);
1597 fp = &xfer->recv.hdr;
1601 for (i = 0; i < 8; i++) {
1602 orb[i] = ntohl(orb[i]);
1604 orb4 = (struct morb4 *)&orb[4];
1606 printf("%s: %s\n", __func__, orb_fun_name[orb4->fun]);
1608 orbi->status.src = SRC_NO_NEXT;
1610 switch (orb4->fun << 16) {
1613 int exclusive = 0, lun;
1615 if (orb[4] & ORB_EXV)
1619 lstate = orbi->sc->lstate[lun];
1621 if (lun >= MAX_LUN || lstate == NULL ||
1623 STAILQ_FIRST(&lstate->logins) != NULL &&
1624 STAILQ_FIRST(&lstate->logins)->fwdev != orbi->fwdev)
1627 orbi->status.dead = 1;
1628 orbi->status.status = STATUS_ACCESS_DENY;
1629 orbi->status.len = 1;
1633 /* allocate login */
1634 login = sbp_targ_get_login(orbi->sc, orbi->fwdev, lun);
1635 if (login == NULL) {
1636 printf("%s: sbp_targ_get_login failed\n",
1638 orbi->status.dead = 1;
1639 orbi->status.status = STATUS_RES_UNAVAIL;
1640 orbi->status.len = 1;
1643 printf("%s: login id=%d\n", __func__, login->id);
1645 login->fifo_hi = orb[6];
1646 login->fifo_lo = orb[7];
1647 login->loginres.len = htons(sizeof(uint32_t) * 4);
1648 login->loginres.id = htons(login->id);
1649 login->loginres.cmd_hi = htons(SBP_TARG_BIND_HI);
1650 login->loginres.cmd_lo = htonl(SBP_TARG_BIND_LO(login->id));
1651 login->loginres.recon_hold = htons(login->hold_sec);
1653 STAILQ_INSERT_TAIL(&lstate->logins, login, link);
1654 fwmem_write_block(orbi->fwdev, NULL, /*spd*/FWSPD_S400, orb[2], orb[3],
1655 sizeof(struct sbp_login_res), (void *)&login->loginres,
1656 fw_asy_callback_free);
1657 /* XXX return status after loginres is successfully written */
1661 login = orbi->sc->logins[orb4->id];
1662 if (login != NULL && login->fwdev == orbi->fwdev) {
1663 login->flags &= ~F_HOLD;
1664 callout_stop(&login->hold_callout);
1665 printf("%s: reconnected id=%d\n",
1666 __func__, login->id);
1668 orbi->status.dead = 1;
1669 orbi->status.status = STATUS_ACCESS_DENY;
1670 printf("%s: reconnection faild id=%d\n",
1671 __func__, orb4->id);
1675 login = orbi->sc->logins[orb4->id];
1676 if (login->fwdev != orbi->fwdev) {
1677 printf("%s: wrong initiator\n", __func__);
1680 sbp_targ_dealloc_login(login);
1683 printf("%s: %s not implemented yet\n",
1684 __func__, orb_fun_name[orb4->fun]);
1687 orbi->status.len = 1;
1688 sbp_targ_status_FIFO(orbi, orb[6], orb[7], /*dequeue*/0);
1694 sbp_targ_pointer_handler(struct fw_xfer *xfer)
1696 struct orb_info *orbi;
1697 uint32_t orb0, orb1;
1699 orbi = (struct orb_info *)xfer->sc;
1700 if (xfer->resp != 0) {
1701 printf("%s: xfer->resp = %d\n", __func__, xfer->resp);
1705 orb0 = ntohl(orbi->orb[0]);
1706 orb1 = ntohl(orbi->orb[1]);
1707 if ((orb0 & (1U << 31)) != 0) {
1708 printf("%s: invalid pointer\n", __func__);
1711 sbp_targ_fetch_orb(orbi->login->lstate->sc, orbi->fwdev,
1712 (uint16_t)orb0, orb1, orbi->login, FETCH_CMD);
1714 free(orbi, M_SBP_TARG);
1720 sbp_targ_fetch_orb(struct sbp_targ_softc *sc, struct fw_device *fwdev,
1721 uint16_t orb_hi, uint32_t orb_lo, struct sbp_targ_login *login,
1724 struct orb_info *orbi;
1727 printf("%s: fetch orb %04x:%08x\n", __func__, orb_hi, orb_lo);
1728 orbi = malloc(sizeof(struct orb_info), M_SBP_TARG, M_NOWAIT | M_ZERO);
1730 printf("%s: malloc failed\n", __func__);
1734 orbi->fwdev = fwdev;
1735 orbi->login = login;
1736 orbi->orb_hi = orb_hi;
1737 orbi->orb_lo = orb_lo;
1738 orbi->status.orb_hi = htons(orb_hi);
1739 orbi->status.orb_lo = htonl(orb_lo);
1740 orbi->page_table = NULL;
1744 fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo,
1745 sizeof(uint32_t) * 8, &orbi->orb[0],
1746 sbp_targ_mgm_handler);
1749 orbi->state = ORBI_STATUS_FETCH;
1750 login->last_hi = orb_hi;
1751 login->last_lo = orb_lo;
1752 login->flags |= F_LINK_ACTIVE;
1755 orbi->atio = (struct ccb_accept_tio *)
1756 SLIST_FIRST(&login->lstate->accept_tios);
1757 if (orbi->atio == NULL) {
1759 printf("%s: no free atio\n", __func__);
1760 login->lstate->flags |= F_ATIO_STARVED;
1761 login->flags |= F_ATIO_STARVED;
1764 login->fwdev = fwdev;
1768 SLIST_REMOVE_HEAD(&login->lstate->accept_tios, sim_links.sle);
1769 STAILQ_INSERT_TAIL(&login->orbs, orbi, link);
1771 fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo,
1772 sizeof(uint32_t) * 8, &orbi->orb[0],
1773 sbp_targ_cmd_handler);
1776 orbi->state = ORBI_STATUS_POINTER;
1777 login->flags |= F_LINK_ACTIVE;
1778 fwmem_read_block(fwdev, (void *)orbi, /*spd*/FWSPD_S400, orb_hi, orb_lo,
1779 sizeof(uint32_t) * 2, &orbi->orb[0],
1780 sbp_targ_pointer_handler);
1783 printf("%s: invalid mode %d\n", __func__, mode);
1788 sbp_targ_resp_callback(struct fw_xfer *xfer)
1790 struct sbp_targ_softc *sc;
1794 printf("%s: xfer=%p\n", __func__, xfer);
1795 sc = (struct sbp_targ_softc *)xfer->sc;
1796 fw_xfer_unload(xfer);
1797 xfer->recv.pay_len = SBP_TARG_RECV_LEN;
1798 xfer->hand = sbp_targ_recv;
1800 STAILQ_INSERT_TAIL(&sc->fwb.xferlist, xfer, link);
1805 sbp_targ_cmd(struct fw_xfer *xfer, struct fw_device *fwdev, int login_id,
1808 struct sbp_targ_login *login;
1809 struct sbp_targ_softc *sc;
1812 if (login_id < 0 || login_id >= MAX_LOGINS)
1813 return (RESP_ADDRESS_ERROR);
1815 sc = (struct sbp_targ_softc *)xfer->sc;
1816 login = sc->logins[login_id];
1818 return (RESP_ADDRESS_ERROR);
1820 if (login->fwdev != fwdev) {
1822 return (RESP_ADDRESS_ERROR);
1826 case 0x08: /* ORB_POINTER */
1828 printf("%s: ORB_POINTER(%d)\n", __func__, login_id);
1829 if ((login->flags & F_LINK_ACTIVE) != 0) {
1831 printf("link active (ORB_POINTER)\n");
1834 sbp_targ_fetch_orb(sc, fwdev,
1835 ntohl(xfer->recv.payload[0]),
1836 ntohl(xfer->recv.payload[1]),
1839 case 0x04: /* AGENT_RESET */
1841 printf("%s: AGENT RESET(%d)\n", __func__, login_id);
1842 login->last_hi = 0xffff;
1843 login->last_lo = 0xffffffff;
1844 sbp_targ_abort(sc, STAILQ_FIRST(&login->orbs));
1846 case 0x10: /* DOORBELL */
1848 printf("%s: DOORBELL(%d)\n", __func__, login_id);
1849 if (login->last_hi == 0xffff &&
1850 login->last_lo == 0xffffffff) {
1851 printf("%s: no previous pointer(DOORBELL)\n",
1855 if ((login->flags & F_LINK_ACTIVE) != 0) {
1857 printf("link active (DOORBELL)\n");
1860 sbp_targ_fetch_orb(sc, fwdev,
1861 login->last_hi, login->last_lo,
1862 login, FETCH_POINTER);
1864 case 0x00: /* AGENT_STATE */
1865 printf("%s: AGENT_STATE (%d:ignore)\n", __func__, login_id);
1867 case 0x14: /* UNSOLICITED_STATE_ENABLE */
1868 printf("%s: UNSOLICITED_STATE_ENABLE (%d:ignore)\n",
1869 __func__, login_id);
1872 printf("%s: invalid register %d(%d)\n",
1873 __func__, reg, login_id);
1874 rtcode = RESP_ADDRESS_ERROR;
1881 sbp_targ_mgm(struct fw_xfer *xfer, struct fw_device *fwdev)
1883 struct sbp_targ_softc *sc;
1886 sc = (struct sbp_targ_softc *)xfer->sc;
1888 fp = &xfer->recv.hdr;
1889 if (fp->mode.wreqb.tcode != FWTCODE_WREQB) {
1890 printf("%s: tcode = %d\n", __func__, fp->mode.wreqb.tcode);
1891 return (RESP_TYPE_ERROR);
1894 sbp_targ_fetch_orb(sc, fwdev,
1895 ntohl(xfer->recv.payload[0]),
1896 ntohl(xfer->recv.payload[1]),
1903 sbp_targ_recv(struct fw_xfer *xfer)
1905 struct fw_pkt *fp, *sfp;
1906 struct fw_device *fwdev;
1909 struct sbp_targ_softc *sc;
1912 sc = (struct sbp_targ_softc *)xfer->sc;
1913 fp = &xfer->recv.hdr;
1914 fwdev = fw_noderesolve_nodeid(sc->fd.fc, fp->mode.wreqb.src & 0x3f);
1915 if (fwdev == NULL) {
1916 printf("%s: cannot resolve nodeid=%d\n",
1917 __func__, fp->mode.wreqb.src & 0x3f);
1918 rtcode = RESP_TYPE_ERROR; /* XXX */
1921 lo = fp->mode.wreqb.dest_lo;
1923 if (lo == SBP_TARG_BIND_LO(-1))
1924 rtcode = sbp_targ_mgm(xfer, fwdev);
1925 else if (lo >= SBP_TARG_BIND_LO(0))
1926 rtcode = sbp_targ_cmd(xfer, fwdev, SBP_TARG_LOGIN_ID(lo),
1929 rtcode = RESP_ADDRESS_ERROR;
1933 printf("%s: rtcode = %d\n", __func__, rtcode);
1934 sfp = &xfer->send.hdr;
1935 xfer->send.spd = FWSPD_S400;
1936 xfer->hand = sbp_targ_resp_callback;
1937 sfp->mode.wres.dst = fp->mode.wreqb.src;
1938 sfp->mode.wres.tlrt = fp->mode.wreqb.tlrt;
1939 sfp->mode.wres.tcode = FWTCODE_WRES;
1940 sfp->mode.wres.rtcode = rtcode;
1941 sfp->mode.wres.pri = 0;
1943 fw_asyreq(xfer->fc, -1, xfer);
1948 sbp_targ_attach(device_t dev)
1950 struct sbp_targ_softc *sc;
1951 struct cam_devq *devq;
1952 struct firewire_comm *fc;
1954 sc = (struct sbp_targ_softc *) device_get_softc(dev);
1955 bzero((void *)sc, sizeof(struct sbp_targ_softc));
1957 mtx_init(&sc->mtx, "sbp_targ", NULL, MTX_DEF);
1958 sc->fd.fc = fc = device_get_ivars(dev);
1960 sc->fd.post_explore = (void *) sbp_targ_post_explore;
1961 sc->fd.post_busreset = (void *) sbp_targ_post_busreset;
1963 devq = cam_simq_alloc(/*maxopenings*/MAX_LUN*MAX_INITIATORS);
1967 sc->sim = cam_sim_alloc(sbp_targ_action, sbp_targ_poll,
1968 "sbp_targ", sc, device_get_unit(dev), &sc->mtx,
1969 /*untagged*/ 1, /*tagged*/ 1, devq);
1970 if (sc->sim == NULL) {
1971 cam_simq_free(devq);
1976 if (xpt_bus_register(sc->sim, dev, /*bus*/0) != CAM_SUCCESS)
1979 if (xpt_create_path(&sc->path, /*periph*/ NULL, cam_sim_path(sc->sim),
1980 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1981 xpt_bus_deregister(cam_sim_path(sc->sim));
1986 sc->fwb.start = SBP_TARG_BIND_START;
1987 sc->fwb.end = SBP_TARG_BIND_END;
1989 /* pre-allocate xfer */
1990 STAILQ_INIT(&sc->fwb.xferlist);
1991 fw_xferlist_add(&sc->fwb.xferlist, M_SBP_TARG,
1992 /*send*/ 0, /*recv*/ SBP_TARG_RECV_LEN, MAX_LUN /* XXX */,
1993 fc, (void *)sc, sbp_targ_recv);
1994 fw_bindadd(fc, &sc->fwb);
1999 cam_sim_free(sc->sim, /*free_devq*/TRUE);
2004 sbp_targ_detach(device_t dev)
2006 struct sbp_targ_softc *sc;
2007 struct sbp_targ_lstate *lstate;
2010 sc = (struct sbp_targ_softc *)device_get_softc(dev);
2011 sc->fd.post_busreset = NULL;
2014 xpt_free_path(sc->path);
2015 xpt_bus_deregister(cam_sim_path(sc->sim));
2016 cam_sim_free(sc->sim, /*free_devq*/TRUE);
2019 for (i = 0; i < MAX_LUN; i++) {
2020 lstate = sc->lstate[i];
2021 if (lstate != NULL) {
2022 xpt_free_path(lstate->path);
2023 free(lstate, M_SBP_TARG);
2026 if (sc->black_hole != NULL) {
2027 xpt_free_path(sc->black_hole->path);
2028 free(sc->black_hole, M_SBP_TARG);
2031 fw_bindremove(sc->fd.fc, &sc->fwb);
2032 fw_xferlist_remove(&sc->fwb.xferlist);
2034 mtx_destroy(&sc->mtx);
2039 static devclass_t sbp_targ_devclass;
2041 static device_method_t sbp_targ_methods[] = {
2042 /* device interface */
2043 DEVMETHOD(device_identify, sbp_targ_identify),
2044 DEVMETHOD(device_probe, sbp_targ_probe),
2045 DEVMETHOD(device_attach, sbp_targ_attach),
2046 DEVMETHOD(device_detach, sbp_targ_detach),
2050 static driver_t sbp_targ_driver = {
2053 sizeof(struct sbp_targ_softc),
2056 DRIVER_MODULE(sbp_targ, firewire, sbp_targ_driver, sbp_targ_devclass, 0, 0);
2057 MODULE_VERSION(sbp_targ, 1);
2058 MODULE_DEPEND(sbp_targ, firewire, 1, 1, 1);
2059 MODULE_DEPEND(sbp_targ, cam, 1, 1, 1);