4 * Copyright (c) 2002 Nate Lawson.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/types.h>
46 #include <sys/queue.h>
47 #include <sys/event.h>
48 #include <sys/param.h>
50 #include <cam/cam_queue.h>
51 #include <cam/scsi/scsi_all.h>
52 #include <cam/scsi/scsi_targetio.h>
53 #include <cam/scsi/scsi_message.h>
54 #include "scsi_target.h"
56 /* Maximum amount to transfer per CTIO */
57 #define MAX_XFER MAXPHYS
58 /* Maximum number of allocated CTIOs */
60 /* Maximum sector size for emulated volume */
61 #define MAX_SECTOR 32768
63 /* Global variables */
74 static struct ccb_queue pending_queue;
75 static struct ccb_queue work_queue;
76 static struct ioc_enable_lun ioc_enlun = {
83 static void cleanup(void);
84 static int init_ccbs(void);
85 static void request_loop(void);
86 static void handle_read(void);
87 /* static int work_atio(struct ccb_accept_tio *); */
88 static void queue_io(struct ccb_scsiio *);
89 static void run_queue(struct ccb_accept_tio *);
90 static int work_inot(struct ccb_immed_notify *);
91 static struct ccb_scsiio *
93 /* static void free_ccb(union ccb *); */
94 static cam_status get_sim_flags(u_int16_t *);
95 static void rel_simq(void);
96 static void abort_all_pending(void);
97 static void usage(void);
100 main(int argc, char *argv[])
103 char *file_name, targname[16];
104 u_int16_t req_flags, sim_flags;
109 req_flags = sim_flags = 0;
111 targ_fd = file_fd = kq_fd = -1;
113 sector_size = SECTOR_SIZE;
116 /* Prepare resource pools */
117 TAILQ_INIT(&pending_queue);
118 TAILQ_INIT(&work_queue);
120 while ((ch = getopt(argc, argv, "AdSTb:c:s:W:")) != -1) {
123 req_flags |= SID_Addr16;
129 req_flags |= SID_Sync;
132 req_flags |= SID_CmdQue;
135 buf_size = atoi(optarg);
136 if (buf_size < 256 || buf_size > MAX_XFER)
137 errx(1, "Unreasonable buf size: %s", optarg);
140 sector_size = atoi(optarg);
141 if (sector_size < 512 || sector_size > MAX_SECTOR)
142 errx(1, "Unreasonable sector size: %s", optarg);
148 last = strlen(optarg) - 1;
150 switch (tolower(optarg[last])) {
172 user_size = strtoll(optarg, (char **)NULL, /*base*/10);
175 errx(1, "Unreasonable volume size: %s", optarg);
179 req_flags &= ~(SID_WBus16 | SID_WBus32);
180 switch (atoi(optarg)) {
182 /* Leave req_flags zeroed */
185 req_flags |= SID_WBus16;
188 req_flags |= SID_WBus32;
191 warnx("Width %s not supported", optarg);
207 sscanf(argv[0], "%u:%u:%u", &ioc_enlun.path_id, &ioc_enlun.target_id,
211 if (ioc_enlun.path_id == CAM_BUS_WILDCARD ||
212 ioc_enlun.target_id == CAM_TARGET_WILDCARD ||
213 ioc_enlun.lun_id == CAM_LUN_WILDCARD) {
214 warnx("Incomplete target path specified");
218 /* We don't support any vendor-specific commands */
219 ioc_enlun.grp6_len = 0;
220 ioc_enlun.grp7_len = 0;
222 /* Open backing store for IO */
223 file_fd = open(file_name, O_RDWR);
225 err(1, "open backing store file");
227 /* Check backing store size or use the size user gave us */
228 if (user_size == 0) {
231 if (fstat(file_fd, &st) < 0)
232 err(1, "fstat file");
233 #if __FreeBSD_version >= 500000
234 if ((st.st_mode & S_IFCHR) != 0) {
237 if (ioctl(file_fd, DIOCGMEDIASIZE, &mediasize) < 0)
238 err(1, "DIOCGMEDIASIZE");
240 /* XXX get sector size by ioctl()?? */
241 volume_size = mediasize / sector_size;
244 volume_size = st.st_size / sector_size;
246 volume_size = user_size / sector_size;
249 #if __FreeBSD_version >= 500000
250 warnx("volume_size: %d bytes x %jd sectors",
252 warnx("volume_size: %d bytes x %lld sectors",
254 sector_size, volume_size);
256 if (volume_size <= 0)
257 errx(1, "volume must be larger than %d", sector_size);
260 struct aiocb aio, *aiop;
262 /* Make sure we have working AIO support */
263 memset(&aio, 0, sizeof(aio));
264 aio.aio_buf = malloc(sector_size);
265 if (aio.aio_buf == NULL)
267 aio.aio_fildes = file_fd;
269 aio.aio_nbytes = sector_size;
270 signal(SIGSYS, SIG_IGN);
271 if (aio_read(&aio) != 0) {
272 printf("You must enable VFS_AIO in your kernel "
273 "or load the aio(4) module.\n");
276 if (aio_waitcomplete(&aiop, NULL) != sector_size)
277 err(1, "aio_waitcomplete");
278 assert(aiop == &aio);
279 signal(SIGSYS, SIG_DFL);
280 free((void *)aio.aio_buf);
282 warnx("aio support tested ok");
285 /* Go through all the control devices and find one that isn't busy. */
288 snprintf(targname, sizeof(targname), "/dev/targ%d", unit++);
289 targ_fd = open(targname, O_RDWR);
290 } while (targ_fd < 0 && errno == EBUSY);
293 err(1, "Tried to open %d devices, none available", unit);
295 /* The first three are handled by kevent() later */
296 signal(SIGHUP, SIG_IGN);
297 signal(SIGINT, SIG_IGN);
298 signal(SIGTERM, SIG_IGN);
299 signal(SIGPROF, SIG_IGN);
300 signal(SIGALRM, SIG_IGN);
301 signal(SIGSTOP, SIG_IGN);
302 signal(SIGTSTP, SIG_IGN);
304 /* Register a cleanup handler to run when exiting */
307 /* Enable listening on the specified LUN */
308 if (ioctl(targ_fd, TARGIOCENABLE, &ioc_enlun) != 0)
309 err(1, "TARGIOCENABLE");
311 /* Enable debugging if requested */
313 if (ioctl(targ_fd, TARGIOCDEBUG, &debug) != 0)
314 err(1, "TARGIOCDEBUG");
317 /* Set up inquiry data according to what SIM supports */
318 if (get_sim_flags(&sim_flags) != CAM_REQ_CMP)
319 errx(1, "get_sim_flags");
320 if (tcmd_init(req_flags, sim_flags) != 0)
321 errx(1, "Initializing tcmd subsystem failed");
323 /* Queue ATIOs and INOTs on descriptor */
324 if (init_ccbs() != 0)
325 errx(1, "init_ccbs failed");
328 warnx("main loop beginning");
337 struct ccb_hdr *ccb_h;
340 warnx("cleanup called");
342 ioctl(targ_fd, TARGIOCDEBUG, &debug);
344 ioctl(targ_fd, TARGIOCDISABLE, NULL);
347 while ((ccb_h = TAILQ_FIRST(&pending_queue)) != NULL) {
348 TAILQ_REMOVE(&pending_queue, ccb_h, periph_links.tqe);
349 free_ccb((union ccb *)ccb_h);
351 while ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
352 TAILQ_REMOVE(&work_queue, ccb_h, periph_links.tqe);
353 free_ccb((union ccb *)ccb_h);
360 /* Allocate ATIOs/INOTs and queue on HBA */
366 for (i = 0; i < MAX_INITIATORS; i++) {
367 struct ccb_accept_tio *atio;
368 struct atio_descr *a_descr;
369 struct ccb_immed_notify *inot;
371 atio = (struct ccb_accept_tio *)malloc(sizeof(*atio));
376 a_descr = (struct atio_descr *)malloc(sizeof(*a_descr));
377 if (a_descr == NULL) {
379 warn("malloc atio_descr");
382 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
383 atio->ccb_h.targ_descr = a_descr;
384 send_ccb((union ccb *)atio, /*priority*/1);
386 inot = (struct ccb_immed_notify *)malloc(sizeof(*inot));
391 inot->ccb_h.func_code = XPT_IMMED_NOTIFY;
392 send_ccb((union ccb *)inot, /*priority*/1);
401 struct kevent events[MAX_EVENTS];
402 struct timespec ts, *tptr;
405 /* Register kqueue for event notification */
406 if ((kq_fd = kqueue()) < 0)
407 err(1, "init kqueue");
409 /* Set up some default events */
410 EV_SET(&events[0], SIGHUP, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
411 EV_SET(&events[1], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
412 EV_SET(&events[2], SIGTERM, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
413 EV_SET(&events[3], targ_fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0);
414 if (kevent(kq_fd, events, 4, NULL, 0, NULL) < 0)
415 err(1, "kevent signal registration");
422 /* Loop until user signal */
425 struct ccb_hdr *ccb_h;
427 /* Check for the next signal, read ready, or AIO completion */
428 retval = kevent(kq_fd, NULL, 0, events, MAX_EVENTS, tptr);
430 if (errno == EINTR) {
432 warnx("EINTR, looping");
436 err(1, "kevent failed");
438 } else if (retval > MAX_EVENTS) {
439 errx(1, "kevent returned more events than allocated?");
442 /* Process all received events. */
443 for (i = 0; i < retval; i++) {
444 if ((events[i].flags & EV_ERROR) != 0)
445 errx(1, "kevent registration failed");
447 switch (events[i].filter) {
455 struct ccb_scsiio *ctio;
456 struct ctio_descr *c_descr;
460 ctio = (struct ccb_scsiio *)events[i].udata;
461 c_descr = (struct ctio_descr *)
462 ctio->ccb_h.targ_descr;
463 c_descr->event = AIO_DONE;
464 /* Queue on the appropriate ATIO */
466 /* Process any queued completions. */
467 run_queue(c_descr->atio);
472 warnx("signal ready, setting quit");
476 warnx("unknown event %#x", events[i].filter);
484 /* Grab the first CCB and perform one work unit. */
485 if ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
488 ccb = (union ccb *)ccb_h;
489 switch (ccb_h->func_code) {
490 case XPT_ACCEPT_TARGET_IO:
491 /* Start one more transfer. */
492 retval = work_atio(&ccb->atio);
494 case XPT_IMMED_NOTIFY:
495 retval = work_inot(&ccb->cin);
498 warnx("Unhandled ccb type %#x on workq",
504 /* Assume work function handled the exception */
505 if ((ccb_h->status & CAM_DEV_QFRZN) != 0) {
507 warnx("Queue frozen receiving CCB, "
513 /* No more work needed for this command. */
515 TAILQ_REMOVE(&work_queue, ccb_h,
521 * Poll for new events (i.e. completions) while we
522 * are processing CCBs on the work_queue. Once it's
523 * empty, use an infinite wait.
525 if (!TAILQ_EMPTY(&work_queue))
532 /* CCBs are ready from the kernel */
536 union ccb *ccb_array[MAX_INITIATORS], *ccb;
539 ccb_count = read(targ_fd, ccb_array, sizeof(ccb_array));
540 if (ccb_count <= 0) {
541 warn("read ccb ptrs");
544 ccb_count /= sizeof(union ccb *);
546 warnx("truncated read ccb ptr?");
550 for (i = 0; i < ccb_count; i++) {
552 TAILQ_REMOVE(&pending_queue, &ccb->ccb_h, periph_links.tqe);
554 switch (ccb->ccb_h.func_code) {
555 case XPT_ACCEPT_TARGET_IO:
557 struct ccb_accept_tio *atio;
558 struct atio_descr *a_descr;
560 /* Initialize ATIO descr for this transaction */
562 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
563 bzero(a_descr, sizeof(*a_descr));
564 TAILQ_INIT(&a_descr->cmplt_io);
565 a_descr->flags = atio->ccb_h.flags &
566 (CAM_DIS_DISCONNECT | CAM_TAG_ACTION_VALID);
567 /* XXX add a_descr->priority */
568 if ((atio->ccb_h.flags & CAM_CDB_POINTER) == 0)
569 a_descr->cdb = atio->cdb_io.cdb_bytes;
571 a_descr->cdb = atio->cdb_io.cdb_ptr;
573 /* ATIOs are processed in FIFO order */
574 TAILQ_INSERT_TAIL(&work_queue, &ccb->ccb_h,
578 case XPT_CONT_TARGET_IO:
580 struct ccb_scsiio *ctio;
581 struct ctio_descr *c_descr;
584 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
585 c_descr->event = CTIO_DONE;
586 /* Queue on the appropriate ATIO */
588 /* Process any queued completions. */
589 run_queue(c_descr->atio);
592 case XPT_IMMED_NOTIFY:
593 /* INOTs are handled with priority */
594 TAILQ_INSERT_HEAD(&work_queue, &ccb->ccb_h,
598 warnx("Unhandled ccb type %#x in handle_read",
599 ccb->ccb_h.func_code);
605 /* Process an ATIO CCB from the kernel */
607 work_atio(struct ccb_accept_tio *atio)
609 struct ccb_scsiio *ctio;
610 struct atio_descr *a_descr;
611 struct ctio_descr *c_descr;
616 warnx("Working on ATIO %p", atio);
618 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
620 /* Get a CTIO and initialize it according to our known parameters */
625 ctio->ccb_h.flags = a_descr->flags;
626 ctio->tag_id = atio->tag_id;
627 ctio->init_id = atio->init_id;
628 /* XXX priority needs to be added to a_descr */
629 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
630 c_descr->atio = atio;
631 if ((a_descr->flags & CAM_DIR_IN) != 0)
632 c_descr->offset = a_descr->base_off + a_descr->targ_req;
633 else if ((a_descr->flags & CAM_DIR_MASK) == CAM_DIR_OUT)
634 c_descr->offset = a_descr->base_off + a_descr->init_req;
636 c_descr->offset = a_descr->base_off;
639 * Return a check condition if there was an error while
640 * receiving this ATIO.
642 if (atio->sense_len != 0) {
643 struct scsi_sense_data *sense;
646 warnx("ATIO with %u bytes sense received",
649 sense = &atio->sense_data;
650 tcmd_sense(ctio->init_id, ctio, sense->flags,
651 sense->add_sense_code, sense->add_sense_code_qual);
652 send_ccb((union ccb *)ctio, /*priority*/1);
656 status = atio->ccb_h.status & CAM_STATUS_MASK;
659 ret = tcmd_handle(atio, ctio, ATIO_WORK);
661 case CAM_REQ_ABORTED:
663 TAILQ_REMOVE(&work_queue, &atio->ccb_h, periph_links.tqe);
664 send_ccb((union ccb *)atio, /*priority*/1);
668 warnx("ATIO completed with unhandled status %#x", status);
678 queue_io(struct ccb_scsiio *ctio)
680 struct ccb_hdr *ccb_h;
681 struct io_queue *ioq;
682 struct ctio_descr *c_descr, *curr_descr;
684 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
685 /* If the completion is for a specific ATIO, queue in order */
686 if (c_descr->atio != NULL) {
687 struct atio_descr *a_descr;
689 a_descr = (struct atio_descr *)c_descr->atio->ccb_h.targ_descr;
690 ioq = &a_descr->cmplt_io;
692 errx(1, "CTIO %p has NULL ATIO", ctio);
695 /* Insert in order, sorted by offset */
696 if (!TAILQ_EMPTY(ioq)) {
697 TAILQ_FOREACH_REVERSE(ccb_h, ioq, io_queue, periph_links.tqe) {
698 curr_descr = (struct ctio_descr *)ccb_h->targ_descr;
699 if (curr_descr->offset <= c_descr->offset) {
700 TAILQ_INSERT_AFTER(ioq, ccb_h, &ctio->ccb_h,
704 if (TAILQ_PREV(ccb_h, io_queue, periph_links.tqe)
706 TAILQ_INSERT_BEFORE(ccb_h, &ctio->ccb_h,
712 TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
717 * Go through all completed AIO/CTIOs for a given ATIO and advance data
718 * counts, start continuation IO, etc.
721 run_queue(struct ccb_accept_tio *atio)
723 struct atio_descr *a_descr;
724 struct ccb_hdr *ccb_h;
725 int sent_status, event;
730 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
732 while ((ccb_h = TAILQ_FIRST(&a_descr->cmplt_io)) != NULL) {
733 struct ccb_scsiio *ctio;
734 struct ctio_descr *c_descr;
736 ctio = (struct ccb_scsiio *)ccb_h;
737 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
739 if (ctio->ccb_h.status == CAM_REQ_ABORTED) {
740 TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
742 free_ccb((union ccb *)ctio);
743 send_ccb((union ccb *)atio, /*priority*/1);
747 /* If completed item is in range, call handler */
748 if ((c_descr->event == AIO_DONE &&
749 c_descr->offset == a_descr->base_off + a_descr->targ_ack)
750 || (c_descr->event == CTIO_DONE &&
751 c_descr->offset == a_descr->base_off + a_descr->init_ack)) {
752 sent_status = (ccb_h->flags & CAM_SEND_STATUS) != 0;
753 event = c_descr->event;
755 TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
757 tcmd_handle(atio, ctio, c_descr->event);
759 /* If entire transfer complete, send back ATIO */
760 if (sent_status != 0 && event == CTIO_DONE)
761 send_ccb((union ccb *)atio, /*priority*/1);
763 /* Gap in offsets so wait until later callback */
765 warnx("IO %p out of order", ccb_h);
772 work_inot(struct ccb_immed_notify *inot)
778 warnx("Working on INOT %p", inot);
780 status = inot->ccb_h.status;
781 sense = (status & CAM_AUTOSNS_VALID) != 0;
782 status &= CAM_STATUS_MASK;
785 case CAM_SCSI_BUS_RESET:
786 tcmd_ua(CAM_TARGET_WILDCARD, UA_BUS_RESET);
790 tcmd_ua(CAM_TARGET_WILDCARD, UA_BDR);
793 case CAM_MESSAGE_RECV:
794 switch (inot->message_args[0]) {
795 case MSG_TASK_COMPLETE:
796 case MSG_INITIATOR_DET_ERR:
797 case MSG_ABORT_TASK_SET:
798 case MSG_MESSAGE_REJECT:
800 case MSG_PARITY_ERROR:
801 case MSG_TARGET_RESET:
803 case MSG_CLEAR_TASK_SET:
805 warnx("INOT message %#x", inot->message_args[0]);
809 case CAM_REQ_ABORTED:
810 warnx("INOT %p aborted", inot);
813 warnx("Unhandled INOT status %#x", status);
817 /* If there is sense data, use it */
819 struct scsi_sense_data *sense;
821 sense = &inot->sense_data;
822 tcmd_sense(inot->initiator_id, NULL, sense->flags,
823 sense->add_sense_code, sense->add_sense_code_qual);
825 warnx("INOT has sense: %#x", sense->flags);
829 TAILQ_REMOVE(&work_queue, &inot->ccb_h, periph_links.tqe);
830 send_ccb((union ccb *)inot, /*priority*/1);
836 send_ccb(union ccb *ccb, int priority)
839 warnx("sending ccb (%#x)", ccb->ccb_h.func_code);
840 ccb->ccb_h.pinfo.priority = priority;
841 if (XPT_FC_IS_QUEUED(ccb)) {
842 TAILQ_INSERT_TAIL(&pending_queue, &ccb->ccb_h,
845 if (write(targ_fd, &ccb, sizeof(ccb)) != sizeof(ccb)) {
847 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
851 /* Return a CTIO/descr/buf combo from the freelist or malloc one */
852 static struct ccb_scsiio *
855 struct ccb_scsiio *ctio;
856 struct ctio_descr *c_descr;
859 if (num_ctios == MAX_CTIOS)
862 ctio = (struct ccb_scsiio *)malloc(sizeof(*ctio));
867 c_descr = (struct ctio_descr *)malloc(sizeof(*c_descr));
868 if (c_descr == NULL) {
870 warn("malloc ctio_descr");
873 c_descr->buf = malloc(buf_size);
874 if (c_descr->buf == NULL) {
877 warn("malloc backing store");
882 /* Initialize CTIO, CTIO descr, and AIO */
883 ctio->ccb_h.func_code = XPT_CONT_TARGET_IO;
884 ctio->ccb_h.retry_count = 2;
885 ctio->ccb_h.timeout = CAM_TIME_INFINITY;
886 ctio->data_ptr = c_descr->buf;
887 ctio->ccb_h.targ_descr = c_descr;
888 c_descr->aiocb.aio_buf = c_descr->buf;
889 c_descr->aiocb.aio_fildes = file_fd;
890 se = &c_descr->aiocb.aio_sigevent;
891 se->sigev_notify = SIGEV_KEVENT;
892 se->sigev_notify_kqueue = kq_fd;
893 se->sigev_value.sigval_ptr = ctio;
899 free_ccb(union ccb *ccb)
901 switch (ccb->ccb_h.func_code) {
902 case XPT_CONT_TARGET_IO:
904 struct ctio_descr *c_descr;
906 c_descr = (struct ctio_descr *)ccb->ccb_h.targ_descr;
911 case XPT_ACCEPT_TARGET_IO:
912 free(ccb->ccb_h.targ_descr);
914 case XPT_IMMED_NOTIFY:
922 get_sim_flags(u_int16_t *flags)
924 struct ccb_pathinq cpi;
927 /* Find SIM capabilities */
928 bzero(&cpi, sizeof(cpi));
929 cpi.ccb_h.func_code = XPT_PATH_INQ;
930 send_ccb((union ccb *)&cpi, /*priority*/1);
931 status = cpi.ccb_h.status & CAM_STATUS_MASK;
932 if (status != CAM_REQ_CMP) {
933 fprintf(stderr, "CPI failed, status %#x\n", status);
937 /* Can only enable on controllers that support target mode */
938 if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
939 fprintf(stderr, "HBA does not support target mode\n");
940 status = CAM_PATH_INVALID;
944 *flags = cpi.hba_inquiry;
951 struct ccb_relsim crs;
953 bzero(&crs, sizeof(crs));
954 crs.ccb_h.func_code = XPT_REL_SIMQ;
955 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
957 crs.release_timeout = 0;
959 send_ccb((union ccb *)&crs, /*priority*/0);
962 /* Cancel all pending CCBs. */
966 struct ccb_abort cab;
967 struct ccb_hdr *ccb_h;
970 warnx("abort_all_pending");
972 bzero(&cab, sizeof(cab));
973 cab.ccb_h.func_code = XPT_ABORT;
974 TAILQ_FOREACH(ccb_h, &pending_queue, periph_links.tqe) {
976 warnx("Aborting pending CCB %p\n", ccb_h);
977 cab.abort_ccb = (union ccb *)ccb_h;
978 send_ccb((union ccb *)&cab, /*priority*/1);
979 if (cab.ccb_h.status != CAM_REQ_CMP) {
980 warnx("Unable to abort CCB, status %#x\n",
990 "Usage: scsi_target [-AdST] [-b bufsize] [-c sectorsize]\n"
991 "\t\t[-r numbufs] [-s volsize] [-W 8,16,32]\n"
992 "\t\tbus:target:lun filename\n");