4 * Copyright (c) 2002 Nate Lawson.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/types.h>
46 #include <sys/queue.h>
47 #include <sys/event.h>
48 #include <sys/param.h>
50 #include <cam/cam_queue.h>
51 #include <cam/scsi/scsi_all.h>
52 #include <cam/scsi/scsi_targetio.h>
53 #include <cam/scsi/scsi_message.h>
54 #include "scsi_target.h"
56 /* Maximum amount to transfer per CTIO */
57 #define MAX_XFER MAXPHYS
58 /* Maximum number of allocated CTIOs */
60 /* Maximum sector size for emulated volume */
61 #define MAX_SECTOR 32768
63 /* Global variables */
75 static struct ccb_queue pending_queue;
76 static struct ccb_queue work_queue;
77 static struct ioc_enable_lun ioc_enlun = {
84 static void cleanup(void);
85 static int init_ccbs(void);
86 static void request_loop(void);
87 static void handle_read(void);
88 /* static int work_atio(struct ccb_accept_tio *); */
89 static void queue_io(struct ccb_scsiio *);
90 static int run_queue(struct ccb_accept_tio *);
91 static int work_inot(struct ccb_immed_notify *);
92 static struct ccb_scsiio *
94 /* static void free_ccb(union ccb *); */
95 static cam_status get_sim_flags(u_int16_t *);
96 static void rel_simq(void);
97 static void abort_all_pending(void);
98 static void usage(void);
101 main(int argc, char *argv[])
104 char *file_name, targname[16];
105 u_int16_t req_flags, sim_flags;
110 req_flags = sim_flags = 0;
112 targ_fd = file_fd = kq_fd = -1;
114 sector_size = SECTOR_SIZE;
117 /* Prepare resource pools */
118 TAILQ_INIT(&pending_queue);
119 TAILQ_INIT(&work_queue);
121 while ((ch = getopt(argc, argv, "AdSTYb:c:s:W:")) != -1) {
124 req_flags |= SID_Addr16;
130 req_flags |= SID_Sync;
133 req_flags |= SID_CmdQue;
136 buf_size = atoi(optarg);
137 if (buf_size < 256 || buf_size > MAX_XFER)
138 errx(1, "Unreasonable buf size: %s", optarg);
141 sector_size = atoi(optarg);
142 if (sector_size < 512 || sector_size > MAX_SECTOR)
143 errx(1, "Unreasonable sector size: %s", optarg);
149 last = strlen(optarg) - 1;
151 switch (tolower(optarg[last])) {
173 user_size = strtoll(optarg, (char **)NULL, /*base*/10);
176 errx(1, "Unreasonable volume size: %s", optarg);
180 req_flags &= ~(SID_WBus16 | SID_WBus32);
181 switch (atoi(optarg)) {
183 /* Leave req_flags zeroed */
186 req_flags |= SID_WBus16;
189 req_flags |= SID_WBus32;
192 warnx("Width %s not supported", optarg);
211 sscanf(argv[0], "%u:%u:%u", &ioc_enlun.path_id, &ioc_enlun.target_id,
215 if (ioc_enlun.path_id == CAM_BUS_WILDCARD ||
216 ioc_enlun.target_id == CAM_TARGET_WILDCARD ||
217 ioc_enlun.lun_id == CAM_LUN_WILDCARD) {
218 warnx("Incomplete target path specified");
222 /* We don't support any vendor-specific commands */
223 ioc_enlun.grp6_len = 0;
224 ioc_enlun.grp7_len = 0;
226 /* Open backing store for IO */
227 file_fd = open(file_name, O_RDWR);
229 errx(EX_NOINPUT, "open backing store file");
231 /* Check backing store size or use the size user gave us */
232 if (user_size == 0) {
235 if (fstat(file_fd, &st) < 0)
236 err(1, "fstat file");
237 #if __FreeBSD_version >= 500000
238 if ((st.st_mode & S_IFCHR) != 0) {
241 if (ioctl(file_fd, DIOCGMEDIASIZE, &mediasize) < 0)
242 err(1, "DIOCGMEDIASIZE");
244 /* XXX get sector size by ioctl()?? */
245 volume_size = mediasize / sector_size;
248 volume_size = st.st_size / sector_size;
250 volume_size = user_size / sector_size;
253 warnx("volume_size: %d bytes x " OFF_FMT " sectors",
254 sector_size, volume_size);
256 if (volume_size <= 0)
257 errx(1, "volume must be larger than %d", sector_size);
260 struct aiocb aio, *aiop;
262 /* See if we have we have working AIO support */
263 memset(&aio, 0, sizeof(aio));
264 aio.aio_buf = malloc(sector_size);
265 if (aio.aio_buf == NULL)
267 aio.aio_fildes = file_fd;
269 aio.aio_nbytes = sector_size;
270 signal(SIGSYS, SIG_IGN);
271 if (aio_read(&aio) != 0) {
272 printf("AIO support is not available- switchin to"
273 " single-threaded mode.\n");
276 if (aio_waitcomplete(&aiop, NULL) != sector_size)
277 err(1, "aio_waitcomplete");
278 assert(aiop == &aio);
279 signal(SIGSYS, SIG_DFL);
281 free((void *)aio.aio_buf);
282 if (debug && notaio == 0)
283 warnx("aio support tested ok");
286 /* Go through all the control devices and find one that isn't busy. */
289 snprintf(targname, sizeof(targname), "/dev/targ%d", unit++);
290 targ_fd = open(targname, O_RDWR);
291 } while (targ_fd < 0 && errno == EBUSY);
294 errx(1, "Tried to open %d devices, none available", unit);
296 warnx("opened %s", targname);
298 /* The first three are handled by kevent() later */
299 signal(SIGHUP, SIG_IGN);
300 signal(SIGINT, SIG_IGN);
301 signal(SIGTERM, SIG_IGN);
302 signal(SIGPROF, SIG_IGN);
303 signal(SIGALRM, SIG_IGN);
304 signal(SIGSTOP, SIG_IGN);
305 signal(SIGTSTP, SIG_IGN);
307 /* Register a cleanup handler to run when exiting */
310 /* Enable listening on the specified LUN */
311 if (ioctl(targ_fd, TARGIOCENABLE, &ioc_enlun) != 0)
312 err(1, "TARGIOCENABLE");
314 /* Enable debugging if requested */
316 if (ioctl(targ_fd, TARGIOCDEBUG, &debug) != 0)
317 warnx("TARGIOCDEBUG");
320 /* Set up inquiry data according to what SIM supports */
321 if (get_sim_flags(&sim_flags) != CAM_REQ_CMP)
322 errx(1, "get_sim_flags");
324 if (tcmd_init(req_flags, sim_flags) != 0)
325 errx(1, "Initializing tcmd subsystem failed");
327 /* Queue ATIOs and INOTs on descriptor */
328 if (init_ccbs() != 0)
329 errx(1, "init_ccbs failed");
332 warnx("main loop beginning");
342 struct ccb_hdr *ccb_h;
345 warnx("cleanup called");
347 ioctl(targ_fd, TARGIOCDEBUG, &debug);
349 ioctl(targ_fd, TARGIOCDISABLE, NULL);
352 while ((ccb_h = TAILQ_FIRST(&pending_queue)) != NULL) {
353 TAILQ_REMOVE(&pending_queue, ccb_h, periph_links.tqe);
354 free_ccb((union ccb *)ccb_h);
356 while ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
357 TAILQ_REMOVE(&work_queue, ccb_h, periph_links.tqe);
358 free_ccb((union ccb *)ccb_h);
365 /* Allocate ATIOs/INOTs and queue on HBA */
371 for (i = 0; i < MAX_INITIATORS; i++) {
372 struct ccb_accept_tio *atio;
373 struct atio_descr *a_descr;
374 struct ccb_immed_notify *inot;
376 atio = (struct ccb_accept_tio *)malloc(sizeof(*atio));
381 a_descr = (struct atio_descr *)malloc(sizeof(*a_descr));
382 if (a_descr == NULL) {
384 warn("malloc atio_descr");
387 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
388 atio->ccb_h.targ_descr = a_descr;
389 send_ccb((union ccb *)atio, /*priority*/1);
391 inot = (struct ccb_immed_notify *)malloc(sizeof(*inot));
396 inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
397 send_ccb((union ccb *)inot, /*priority*/1);
406 struct kevent events[MAX_EVENTS];
407 struct timespec ts, *tptr;
410 /* Register kqueue for event notification */
411 if ((kq_fd = kqueue()) < 0)
412 err(1, "init kqueue");
414 /* Set up some default events */
415 EV_SET(&events[0], SIGHUP, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
416 EV_SET(&events[1], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
417 EV_SET(&events[2], SIGTERM, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
418 EV_SET(&events[3], targ_fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0);
419 if (kevent(kq_fd, events, 4, NULL, 0, NULL) < 0)
420 err(1, "kevent signal registration");
427 /* Loop until user signal */
430 struct ccb_hdr *ccb_h;
432 /* Check for the next signal, read ready, or AIO completion */
433 retval = kevent(kq_fd, NULL, 0, events, MAX_EVENTS, tptr);
435 if (errno == EINTR) {
437 warnx("EINTR, looping");
441 err(1, "kevent failed");
443 } else if (retval > MAX_EVENTS) {
444 errx(1, "kevent returned more events than allocated?");
447 /* Process all received events. */
448 for (oo = i = 0; i < retval; i++) {
449 if ((events[i].flags & EV_ERROR) != 0)
450 errx(1, "kevent registration failed");
452 switch (events[i].filter) {
460 struct ccb_scsiio *ctio;
461 struct ctio_descr *c_descr;
465 ctio = (struct ccb_scsiio *)events[i].udata;
466 c_descr = (struct ctio_descr *)
467 ctio->ccb_h.targ_descr;
468 c_descr->event = AIO_DONE;
469 /* Queue on the appropriate ATIO */
471 /* Process any queued completions. */
472 oo += run_queue(c_descr->atio);
477 warnx("signal ready, setting quit");
481 warnx("unknown event %d", events[i].filter);
486 warnx("event %d done", events[i].filter);
494 /* Grab the first CCB and perform one work unit. */
495 if ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
498 ccb = (union ccb *)ccb_h;
499 switch (ccb_h->func_code) {
500 case XPT_ACCEPT_TARGET_IO:
501 /* Start one more transfer. */
502 retval = work_atio(&ccb->atio);
504 case XPT_IMMED_NOTIFY:
505 retval = work_inot(&ccb->cin);
508 warnx("Unhandled ccb type %#x on workq",
514 /* Assume work function handled the exception */
515 if ((ccb_h->status & CAM_DEV_QFRZN) != 0) {
517 warnx("Queue frozen receiving CCB, "
523 /* No more work needed for this command. */
525 TAILQ_REMOVE(&work_queue, ccb_h,
531 * Poll for new events (i.e. completions) while we
532 * are processing CCBs on the work_queue. Once it's
533 * empty, use an infinite wait.
535 if (!TAILQ_EMPTY(&work_queue))
542 /* CCBs are ready from the kernel */
546 union ccb *ccb_array[MAX_INITIATORS], *ccb;
547 int ccb_count, i, oo;
549 ccb_count = read(targ_fd, ccb_array, sizeof(ccb_array));
550 if (ccb_count <= 0) {
551 warn("read ccb ptrs");
554 ccb_count /= sizeof(union ccb *);
556 warnx("truncated read ccb ptr?");
560 for (i = 0; i < ccb_count; i++) {
562 TAILQ_REMOVE(&pending_queue, &ccb->ccb_h, periph_links.tqe);
564 switch (ccb->ccb_h.func_code) {
565 case XPT_ACCEPT_TARGET_IO:
567 struct ccb_accept_tio *atio;
568 struct atio_descr *a_descr;
570 /* Initialize ATIO descr for this transaction */
572 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
573 bzero(a_descr, sizeof(*a_descr));
574 TAILQ_INIT(&a_descr->cmplt_io);
575 a_descr->flags = atio->ccb_h.flags &
576 (CAM_DIS_DISCONNECT | CAM_TAG_ACTION_VALID);
577 /* XXX add a_descr->priority */
578 if ((atio->ccb_h.flags & CAM_CDB_POINTER) == 0)
579 a_descr->cdb = atio->cdb_io.cdb_bytes;
581 a_descr->cdb = atio->cdb_io.cdb_ptr;
583 /* ATIOs are processed in FIFO order */
584 TAILQ_INSERT_TAIL(&work_queue, &ccb->ccb_h,
588 case XPT_CONT_TARGET_IO:
590 struct ccb_scsiio *ctio;
591 struct ctio_descr *c_descr;
594 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
595 c_descr->event = CTIO_DONE;
596 /* Queue on the appropriate ATIO */
598 /* Process any queued completions. */
599 oo += run_queue(c_descr->atio);
602 case XPT_IMMED_NOTIFY:
603 /* INOTs are handled with priority */
604 TAILQ_INSERT_HEAD(&work_queue, &ccb->ccb_h,
608 warnx("Unhandled ccb type %#x in handle_read",
609 ccb->ccb_h.func_code);
615 /* Process an ATIO CCB from the kernel */
617 work_atio(struct ccb_accept_tio *atio)
619 struct ccb_scsiio *ctio;
620 struct atio_descr *a_descr;
621 struct ctio_descr *c_descr;
626 warnx("Working on ATIO %p", atio);
628 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
630 /* Get a CTIO and initialize it according to our known parameters */
636 ctio->ccb_h.flags = a_descr->flags;
637 ctio->tag_id = atio->tag_id;
638 ctio->init_id = atio->init_id;
639 /* XXX priority needs to be added to a_descr */
640 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
641 c_descr->atio = atio;
642 if ((a_descr->flags & CAM_DIR_IN) != 0)
643 c_descr->offset = a_descr->base_off + a_descr->targ_req;
644 else if ((a_descr->flags & CAM_DIR_MASK) == CAM_DIR_OUT)
645 c_descr->offset = a_descr->base_off + a_descr->init_req;
647 c_descr->offset = a_descr->base_off;
650 * Return a check condition if there was an error while
651 * receiving this ATIO.
653 if (atio->sense_len != 0) {
654 struct scsi_sense_data *sense;
657 warnx("ATIO with %u bytes sense received",
660 sense = &atio->sense_data;
661 tcmd_sense(ctio->init_id, ctio, sense->flags,
662 sense->add_sense_code, sense->add_sense_code_qual);
663 send_ccb((union ccb *)ctio, /*priority*/1);
667 status = atio->ccb_h.status & CAM_STATUS_MASK;
670 ret = tcmd_handle(atio, ctio, ATIO_WORK);
672 case CAM_REQ_ABORTED:
673 warn("ATIO %p aborted", a_descr);
675 TAILQ_REMOVE(&work_queue, &atio->ccb_h, periph_links.tqe);
676 send_ccb((union ccb *)atio, /*priority*/1);
680 warnx("ATIO completed with unhandled status %#x", status);
690 queue_io(struct ccb_scsiio *ctio)
692 struct ccb_hdr *ccb_h;
693 struct io_queue *ioq;
694 struct ctio_descr *c_descr;
696 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
697 if (c_descr->atio == NULL) {
698 errx(1, "CTIO %p has NULL ATIO", ctio);
700 ioq = &((struct atio_descr *)c_descr->atio->ccb_h.targ_descr)->cmplt_io;
702 if (TAILQ_EMPTY(ioq)) {
703 TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
707 TAILQ_FOREACH_REVERSE(ccb_h, ioq, io_queue, periph_links.tqe) {
708 struct ctio_descr *curr_descr =
709 (struct ctio_descr *)ccb_h->targ_descr;
710 if (curr_descr->offset <= c_descr->offset) {
716 TAILQ_INSERT_AFTER(ioq, ccb_h, &ctio->ccb_h, periph_links.tqe);
718 TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
723 * Go through all completed AIO/CTIOs for a given ATIO and advance data
724 * counts, start continuation IO, etc.
727 run_queue(struct ccb_accept_tio *atio)
729 struct atio_descr *a_descr;
730 struct ccb_hdr *ccb_h;
731 int sent_status, event;
736 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
738 while ((ccb_h = TAILQ_FIRST(&a_descr->cmplt_io)) != NULL) {
739 struct ccb_scsiio *ctio;
740 struct ctio_descr *c_descr;
742 ctio = (struct ccb_scsiio *)ccb_h;
743 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
745 if (ctio->ccb_h.status == CAM_REQ_ABORTED) {
746 TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
748 free_ccb((union ccb *)ctio);
749 send_ccb((union ccb *)atio, /*priority*/1);
753 /* If completed item is in range, call handler */
754 if ((c_descr->event == AIO_DONE &&
755 c_descr->offset == a_descr->base_off + a_descr->targ_ack)
756 || (c_descr->event == CTIO_DONE &&
757 c_descr->offset == a_descr->base_off + a_descr->init_ack)) {
758 sent_status = (ccb_h->flags & CAM_SEND_STATUS) != 0;
759 event = c_descr->event;
761 TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
763 tcmd_handle(atio, ctio, c_descr->event);
765 /* If entire transfer complete, send back ATIO */
766 if (sent_status != 0 && event == CTIO_DONE)
767 send_ccb((union ccb *)atio, /*priority*/1);
769 /* Gap in offsets so wait until later callback */
771 warnx("IO %p:%p out of order %s", ccb_h,
772 a_descr, c_descr->event == AIO_DONE?
781 work_inot(struct ccb_immed_notify *inot)
787 warnx("Working on INOT %p", inot);
789 status = inot->ccb_h.status;
790 sense = (status & CAM_AUTOSNS_VALID) != 0;
791 status &= CAM_STATUS_MASK;
794 case CAM_SCSI_BUS_RESET:
795 tcmd_ua(CAM_TARGET_WILDCARD, UA_BUS_RESET);
799 tcmd_ua(CAM_TARGET_WILDCARD, UA_BDR);
802 case CAM_MESSAGE_RECV:
803 switch (inot->message_args[0]) {
804 case MSG_TASK_COMPLETE:
805 case MSG_INITIATOR_DET_ERR:
806 case MSG_ABORT_TASK_SET:
807 case MSG_MESSAGE_REJECT:
809 case MSG_PARITY_ERROR:
810 case MSG_TARGET_RESET:
812 case MSG_CLEAR_TASK_SET:
814 warnx("INOT message %#x", inot->message_args[0]);
818 case CAM_REQ_ABORTED:
819 warnx("INOT %p aborted", inot);
822 warnx("Unhandled INOT status %#x", status);
826 /* If there is sense data, use it */
828 struct scsi_sense_data *sense;
830 sense = &inot->sense_data;
831 tcmd_sense(inot->initiator_id, NULL, sense->flags,
832 sense->add_sense_code, sense->add_sense_code_qual);
834 warnx("INOT has sense: %#x", sense->flags);
838 TAILQ_REMOVE(&work_queue, &inot->ccb_h, periph_links.tqe);
839 send_ccb((union ccb *)inot, /*priority*/1);
845 send_ccb(union ccb *ccb, int priority)
848 warnx("sending ccb (%#x)", ccb->ccb_h.func_code);
849 ccb->ccb_h.pinfo.priority = priority;
850 if (XPT_FC_IS_QUEUED(ccb)) {
851 TAILQ_INSERT_TAIL(&pending_queue, &ccb->ccb_h,
854 if (write(targ_fd, &ccb, sizeof(ccb)) != sizeof(ccb)) {
856 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
860 /* Return a CTIO/descr/buf combo from the freelist or malloc one */
861 static struct ccb_scsiio *
864 struct ccb_scsiio *ctio;
865 struct ctio_descr *c_descr;
868 if (num_ctios == MAX_CTIOS) {
869 warnx("at CTIO max");
873 ctio = (struct ccb_scsiio *)malloc(sizeof(*ctio));
878 c_descr = (struct ctio_descr *)malloc(sizeof(*c_descr));
879 if (c_descr == NULL) {
881 warn("malloc ctio_descr");
884 c_descr->buf = malloc(buf_size);
885 if (c_descr->buf == NULL) {
888 warn("malloc backing store");
893 /* Initialize CTIO, CTIO descr, and AIO */
894 ctio->ccb_h.func_code = XPT_CONT_TARGET_IO;
895 ctio->ccb_h.retry_count = 2;
896 ctio->ccb_h.timeout = CAM_TIME_INFINITY;
897 ctio->data_ptr = c_descr->buf;
898 ctio->ccb_h.targ_descr = c_descr;
899 c_descr->aiocb.aio_buf = c_descr->buf;
900 c_descr->aiocb.aio_fildes = file_fd;
901 se = &c_descr->aiocb.aio_sigevent;
902 se->sigev_notify = SIGEV_KEVENT;
903 se->sigev_notify_kqueue = kq_fd;
904 se->sigev_value.sival_ptr = ctio;
910 free_ccb(union ccb *ccb)
912 switch (ccb->ccb_h.func_code) {
913 case XPT_CONT_TARGET_IO:
915 struct ctio_descr *c_descr;
917 c_descr = (struct ctio_descr *)ccb->ccb_h.targ_descr;
922 case XPT_ACCEPT_TARGET_IO:
923 free(ccb->ccb_h.targ_descr);
925 case XPT_IMMED_NOTIFY:
933 get_sim_flags(u_int16_t *flags)
935 struct ccb_pathinq cpi;
938 /* Find SIM capabilities */
939 bzero(&cpi, sizeof(cpi));
940 cpi.ccb_h.func_code = XPT_PATH_INQ;
941 send_ccb((union ccb *)&cpi, /*priority*/1);
942 status = cpi.ccb_h.status & CAM_STATUS_MASK;
943 if (status != CAM_REQ_CMP) {
944 fprintf(stderr, "CPI failed, status %#x\n", status);
948 /* Can only enable on controllers that support target mode */
949 if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
950 fprintf(stderr, "HBA does not support target mode\n");
951 status = CAM_PATH_INVALID;
955 *flags = cpi.hba_inquiry;
962 struct ccb_relsim crs;
964 bzero(&crs, sizeof(crs));
965 crs.ccb_h.func_code = XPT_REL_SIMQ;
966 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
968 crs.release_timeout = 0;
970 send_ccb((union ccb *)&crs, /*priority*/0);
973 /* Cancel all pending CCBs. */
977 struct ccb_abort cab;
978 struct ccb_hdr *ccb_h;
981 warnx("abort_all_pending");
983 bzero(&cab, sizeof(cab));
984 cab.ccb_h.func_code = XPT_ABORT;
985 TAILQ_FOREACH(ccb_h, &pending_queue, periph_links.tqe) {
987 warnx("Aborting pending CCB %p\n", ccb_h);
988 cab.abort_ccb = (union ccb *)ccb_h;
989 send_ccb((union ccb *)&cab, /*priority*/1);
990 if (cab.ccb_h.status != CAM_REQ_CMP) {
991 warnx("Unable to abort CCB, status %#x\n",
1001 "Usage: scsi_target [-AdSTY] [-b bufsize] [-c sectorsize]\n"
1002 "\t\t[-r numbufs] [-s volsize] [-W 8,16,32]\n"
1003 "\t\tbus:target:lun filename\n");