4 * Copyright (c) 2002 Nate Lawson.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/types.h>
46 #include <sys/queue.h>
47 #include <sys/event.h>
48 #include <sys/param.h>
50 #include <cam/cam_queue.h>
51 #include <cam/scsi/scsi_all.h>
52 #include <cam/scsi/scsi_targetio.h>
53 #include <cam/scsi/scsi_message.h>
54 #include "scsi_target.h"
56 /* Maximum amount to transfer per CTIO */
57 #define MAX_XFER MAXPHYS
58 /* Maximum number of allocated CTIOs */
60 /* Maximum sector size for emulated volume */
61 #define MAX_SECTOR 32768
63 /* Global variables */
75 static struct ccb_queue pending_queue;
76 static struct ccb_queue work_queue;
77 static struct ioc_enable_lun ioc_enlun = {
84 static void cleanup(void);
85 static int init_ccbs(void);
86 static void request_loop(void);
87 static void handle_read(void);
88 /* static int work_atio(struct ccb_accept_tio *); */
89 static void queue_io(struct ccb_scsiio *);
90 static int run_queue(struct ccb_accept_tio *);
91 static int work_inot(struct ccb_immediate_notify *);
92 static struct ccb_scsiio *
94 /* static void free_ccb(union ccb *); */
95 static cam_status get_sim_flags(u_int16_t *);
96 static void rel_simq(void);
97 static void abort_all_pending(void);
98 static void usage(void);
101 main(int argc, char *argv[])
105 u_int16_t req_flags, sim_flags;
110 req_flags = sim_flags = 0;
112 targ_fd = file_fd = kq_fd = -1;
114 sector_size = SECTOR_SIZE;
117 /* Prepare resource pools */
118 TAILQ_INIT(&pending_queue);
119 TAILQ_INIT(&work_queue);
121 while ((ch = getopt(argc, argv, "AdSTYb:c:s:W:")) != -1) {
124 req_flags |= SID_Addr16;
130 req_flags |= SID_Sync;
133 req_flags |= SID_CmdQue;
136 buf_size = atoi(optarg);
137 if (buf_size < 256 || buf_size > MAX_XFER)
138 errx(1, "Unreasonable buf size: %s", optarg);
141 sector_size = atoi(optarg);
142 if (sector_size < 512 || sector_size > MAX_SECTOR)
143 errx(1, "Unreasonable sector size: %s", optarg);
149 last = strlen(optarg) - 1;
151 switch (tolower(optarg[last])) {
173 user_size = strtoll(optarg, (char **)NULL, /*base*/10);
176 errx(1, "Unreasonable volume size: %s", optarg);
180 req_flags &= ~(SID_WBus16 | SID_WBus32);
181 switch (atoi(optarg)) {
183 /* Leave req_flags zeroed */
186 req_flags |= SID_WBus16;
189 req_flags |= SID_WBus32;
192 warnx("Width %s not supported", optarg);
211 sscanf(argv[0], "%u:%u:%u", &ioc_enlun.path_id, &ioc_enlun.target_id,
215 if (ioc_enlun.path_id == CAM_BUS_WILDCARD ||
216 ioc_enlun.target_id == CAM_TARGET_WILDCARD ||
217 ioc_enlun.lun_id == CAM_LUN_WILDCARD) {
218 warnx("Incomplete target path specified");
222 /* We don't support any vendor-specific commands */
223 ioc_enlun.grp6_len = 0;
224 ioc_enlun.grp7_len = 0;
226 /* Open backing store for IO */
227 file_fd = open(file_name, O_RDWR);
229 errx(EX_NOINPUT, "open backing store file");
231 /* Check backing store size or use the size user gave us */
232 if (user_size == 0) {
235 if (fstat(file_fd, &st) < 0)
236 err(1, "fstat file");
237 #if __FreeBSD_version >= 500000
238 if ((st.st_mode & S_IFCHR) != 0) {
241 if (ioctl(file_fd, DIOCGMEDIASIZE, &mediasize) < 0)
242 err(1, "DIOCGMEDIASIZE");
244 /* XXX get sector size by ioctl()?? */
245 volume_size = mediasize / sector_size;
248 volume_size = st.st_size / sector_size;
250 volume_size = user_size / sector_size;
253 warnx("volume_size: %d bytes x " OFF_FMT " sectors",
254 sector_size, volume_size);
256 if (volume_size <= 0)
257 errx(1, "volume must be larger than %d", sector_size);
260 struct aiocb aio, *aiop;
262 /* See if we have we have working AIO support */
263 memset(&aio, 0, sizeof(aio));
264 aio.aio_buf = malloc(sector_size);
265 if (aio.aio_buf == NULL)
267 aio.aio_fildes = file_fd;
269 aio.aio_nbytes = sector_size;
270 signal(SIGSYS, SIG_IGN);
271 if (aio_read(&aio) != 0) {
272 printf("AIO support is not available- switchin to"
273 " single-threaded mode.\n");
276 if (aio_waitcomplete(&aiop, NULL) != sector_size)
277 err(1, "aio_waitcomplete");
278 assert(aiop == &aio);
279 signal(SIGSYS, SIG_DFL);
281 free((void *)aio.aio_buf);
282 if (debug && notaio == 0)
283 warnx("aio support tested ok");
286 targ_fd = open("/dev/targ", O_RDWR);
290 warnx("opened /dev/targ");
292 /* The first three are handled by kevent() later */
293 signal(SIGHUP, SIG_IGN);
294 signal(SIGINT, SIG_IGN);
295 signal(SIGTERM, SIG_IGN);
296 signal(SIGPROF, SIG_IGN);
297 signal(SIGALRM, SIG_IGN);
298 signal(SIGSTOP, SIG_IGN);
299 signal(SIGTSTP, SIG_IGN);
301 /* Register a cleanup handler to run when exiting */
304 /* Enable listening on the specified LUN */
305 if (ioctl(targ_fd, TARGIOCENABLE, &ioc_enlun) != 0)
306 err(1, "TARGIOCENABLE");
308 /* Enable debugging if requested */
310 if (ioctl(targ_fd, TARGIOCDEBUG, &debug) != 0)
311 warnx("TARGIOCDEBUG");
314 /* Set up inquiry data according to what SIM supports */
315 if (get_sim_flags(&sim_flags) != CAM_REQ_CMP)
316 errx(1, "get_sim_flags");
318 if (tcmd_init(req_flags, sim_flags) != 0)
319 errx(1, "Initializing tcmd subsystem failed");
321 /* Queue ATIOs and INOTs on descriptor */
322 if (init_ccbs() != 0)
323 errx(1, "init_ccbs failed");
326 warnx("main loop beginning");
336 struct ccb_hdr *ccb_h;
339 warnx("cleanup called");
341 ioctl(targ_fd, TARGIOCDEBUG, &debug);
343 ioctl(targ_fd, TARGIOCDISABLE, NULL);
346 while ((ccb_h = TAILQ_FIRST(&pending_queue)) != NULL) {
347 TAILQ_REMOVE(&pending_queue, ccb_h, periph_links.tqe);
348 free_ccb((union ccb *)ccb_h);
350 while ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
351 TAILQ_REMOVE(&work_queue, ccb_h, periph_links.tqe);
352 free_ccb((union ccb *)ccb_h);
359 /* Allocate ATIOs/INOTs and queue on HBA */
365 for (i = 0; i < MAX_INITIATORS; i++) {
366 struct ccb_accept_tio *atio;
367 struct atio_descr *a_descr;
368 struct ccb_immediate_notify *inot;
370 atio = (struct ccb_accept_tio *)malloc(sizeof(*atio));
375 a_descr = (struct atio_descr *)malloc(sizeof(*a_descr));
376 if (a_descr == NULL) {
378 warn("malloc atio_descr");
381 atio->ccb_h.func_code = XPT_ACCEPT_TARGET_IO;
382 atio->ccb_h.targ_descr = a_descr;
383 send_ccb((union ccb *)atio, /*priority*/1);
385 inot = (struct ccb_immediate_notify *)malloc(sizeof(*inot));
390 inot->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY;
391 send_ccb((union ccb *)inot, /*priority*/1);
400 struct kevent events[MAX_EVENTS];
401 struct timespec ts, *tptr;
404 /* Register kqueue for event notification */
405 if ((kq_fd = kqueue()) < 0)
406 err(1, "init kqueue");
408 /* Set up some default events */
409 EV_SET(&events[0], SIGHUP, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
410 EV_SET(&events[1], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
411 EV_SET(&events[2], SIGTERM, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0, 0, 0);
412 EV_SET(&events[3], targ_fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0);
413 if (kevent(kq_fd, events, 4, NULL, 0, NULL) < 0)
414 err(1, "kevent signal registration");
421 /* Loop until user signal */
424 struct ccb_hdr *ccb_h;
426 /* Check for the next signal, read ready, or AIO completion */
427 retval = kevent(kq_fd, NULL, 0, events, MAX_EVENTS, tptr);
429 if (errno == EINTR) {
431 warnx("EINTR, looping");
435 err(1, "kevent failed");
437 } else if (retval > MAX_EVENTS) {
438 errx(1, "kevent returned more events than allocated?");
441 /* Process all received events. */
442 for (oo = i = 0; i < retval; i++) {
443 if ((events[i].flags & EV_ERROR) != 0)
444 errx(1, "kevent registration failed");
446 switch (events[i].filter) {
454 struct ccb_scsiio *ctio;
455 struct ctio_descr *c_descr;
459 ctio = (struct ccb_scsiio *)events[i].udata;
460 c_descr = (struct ctio_descr *)
461 ctio->ccb_h.targ_descr;
462 c_descr->event = AIO_DONE;
463 /* Queue on the appropriate ATIO */
465 /* Process any queued completions. */
466 oo += run_queue(c_descr->atio);
471 warnx("signal ready, setting quit");
475 warnx("unknown event %d", events[i].filter);
480 warnx("event %d done", events[i].filter);
488 /* Grab the first CCB and perform one work unit. */
489 if ((ccb_h = TAILQ_FIRST(&work_queue)) != NULL) {
492 ccb = (union ccb *)ccb_h;
493 switch (ccb_h->func_code) {
494 case XPT_ACCEPT_TARGET_IO:
495 /* Start one more transfer. */
496 retval = work_atio(&ccb->atio);
498 case XPT_IMMEDIATE_NOTIFY:
499 retval = work_inot(&ccb->cin1);
502 warnx("Unhandled ccb type %#x on workq",
508 /* Assume work function handled the exception */
509 if ((ccb_h->status & CAM_DEV_QFRZN) != 0) {
511 warnx("Queue frozen receiving CCB, "
517 /* No more work needed for this command. */
519 TAILQ_REMOVE(&work_queue, ccb_h,
525 * Poll for new events (i.e. completions) while we
526 * are processing CCBs on the work_queue. Once it's
527 * empty, use an infinite wait.
529 if (!TAILQ_EMPTY(&work_queue))
536 /* CCBs are ready from the kernel */
540 union ccb *ccb_array[MAX_INITIATORS], *ccb;
541 int ccb_count, i, oo;
543 ccb_count = read(targ_fd, ccb_array, sizeof(ccb_array));
544 if (ccb_count <= 0) {
545 warn("read ccb ptrs");
548 ccb_count /= sizeof(union ccb *);
550 warnx("truncated read ccb ptr?");
554 for (i = 0; i < ccb_count; i++) {
556 TAILQ_REMOVE(&pending_queue, &ccb->ccb_h, periph_links.tqe);
558 switch (ccb->ccb_h.func_code) {
559 case XPT_ACCEPT_TARGET_IO:
561 struct ccb_accept_tio *atio;
562 struct atio_descr *a_descr;
564 /* Initialize ATIO descr for this transaction */
566 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
567 bzero(a_descr, sizeof(*a_descr));
568 TAILQ_INIT(&a_descr->cmplt_io);
569 a_descr->flags = atio->ccb_h.flags &
570 (CAM_DIS_DISCONNECT | CAM_TAG_ACTION_VALID);
571 /* XXX add a_descr->priority */
572 if ((atio->ccb_h.flags & CAM_CDB_POINTER) == 0)
573 a_descr->cdb = atio->cdb_io.cdb_bytes;
575 a_descr->cdb = atio->cdb_io.cdb_ptr;
577 /* ATIOs are processed in FIFO order */
578 TAILQ_INSERT_TAIL(&work_queue, &ccb->ccb_h,
582 case XPT_CONT_TARGET_IO:
584 struct ccb_scsiio *ctio;
585 struct ctio_descr *c_descr;
588 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
589 c_descr->event = CTIO_DONE;
590 /* Queue on the appropriate ATIO */
592 /* Process any queued completions. */
593 oo += run_queue(c_descr->atio);
596 case XPT_IMMEDIATE_NOTIFY:
597 /* INOTs are handled with priority */
598 TAILQ_INSERT_HEAD(&work_queue, &ccb->ccb_h,
602 warnx("Unhandled ccb type %#x in handle_read",
603 ccb->ccb_h.func_code);
609 /* Process an ATIO CCB from the kernel */
611 work_atio(struct ccb_accept_tio *atio)
613 struct ccb_scsiio *ctio;
614 struct atio_descr *a_descr;
615 struct ctio_descr *c_descr;
620 warnx("Working on ATIO %p", atio);
622 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
624 /* Get a CTIO and initialize it according to our known parameters */
630 ctio->ccb_h.flags = a_descr->flags;
631 ctio->tag_id = atio->tag_id;
632 ctio->init_id = atio->init_id;
633 /* XXX priority needs to be added to a_descr */
634 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
635 c_descr->atio = atio;
636 if ((a_descr->flags & CAM_DIR_IN) != 0)
637 c_descr->offset = a_descr->base_off + a_descr->targ_req;
638 else if ((a_descr->flags & CAM_DIR_MASK) == CAM_DIR_OUT)
639 c_descr->offset = a_descr->base_off + a_descr->init_req;
641 c_descr->offset = a_descr->base_off;
644 * Return a check condition if there was an error while
645 * receiving this ATIO.
647 if (atio->sense_len != 0) {
648 struct scsi_sense_data_fixed *sense;
651 warnx("ATIO with %u bytes sense received",
654 sense = (struct scsi_sense_data_fixed *)&atio->sense_data;
655 tcmd_sense(ctio->init_id, ctio, sense->flags,
656 sense->add_sense_code, sense->add_sense_code_qual);
657 send_ccb((union ccb *)ctio, /*priority*/1);
661 status = atio->ccb_h.status & CAM_STATUS_MASK;
664 ret = tcmd_handle(atio, ctio, ATIO_WORK);
666 case CAM_REQ_ABORTED:
667 warn("ATIO %p aborted", a_descr);
669 TAILQ_REMOVE(&work_queue, &atio->ccb_h, periph_links.tqe);
670 send_ccb((union ccb *)atio, /*priority*/1);
674 warnx("ATIO completed with unhandled status %#x", status);
684 queue_io(struct ccb_scsiio *ctio)
686 struct ccb_hdr *ccb_h;
687 struct io_queue *ioq;
688 struct ctio_descr *c_descr;
690 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
691 if (c_descr->atio == NULL) {
692 errx(1, "CTIO %p has NULL ATIO", ctio);
694 ioq = &((struct atio_descr *)c_descr->atio->ccb_h.targ_descr)->cmplt_io;
696 if (TAILQ_EMPTY(ioq)) {
697 TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
701 TAILQ_FOREACH_REVERSE(ccb_h, ioq, io_queue, periph_links.tqe) {
702 struct ctio_descr *curr_descr =
703 (struct ctio_descr *)ccb_h->targ_descr;
704 if (curr_descr->offset <= c_descr->offset) {
710 TAILQ_INSERT_AFTER(ioq, ccb_h, &ctio->ccb_h, periph_links.tqe);
712 TAILQ_INSERT_HEAD(ioq, &ctio->ccb_h, periph_links.tqe);
717 * Go through all completed AIO/CTIOs for a given ATIO and advance data
718 * counts, start continuation IO, etc.
721 run_queue(struct ccb_accept_tio *atio)
723 struct atio_descr *a_descr;
724 struct ccb_hdr *ccb_h;
725 int sent_status, event;
730 a_descr = (struct atio_descr *)atio->ccb_h.targ_descr;
732 while ((ccb_h = TAILQ_FIRST(&a_descr->cmplt_io)) != NULL) {
733 struct ccb_scsiio *ctio;
734 struct ctio_descr *c_descr;
736 ctio = (struct ccb_scsiio *)ccb_h;
737 c_descr = (struct ctio_descr *)ctio->ccb_h.targ_descr;
739 if (ctio->ccb_h.status == CAM_REQ_ABORTED) {
740 TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
742 free_ccb((union ccb *)ctio);
743 send_ccb((union ccb *)atio, /*priority*/1);
747 /* If completed item is in range, call handler */
748 if ((c_descr->event == AIO_DONE &&
749 c_descr->offset == a_descr->base_off + a_descr->targ_ack)
750 || (c_descr->event == CTIO_DONE &&
751 c_descr->offset == a_descr->base_off + a_descr->init_ack)) {
752 sent_status = (ccb_h->flags & CAM_SEND_STATUS) != 0;
753 event = c_descr->event;
755 TAILQ_REMOVE(&a_descr->cmplt_io, ccb_h,
757 tcmd_handle(atio, ctio, c_descr->event);
759 /* If entire transfer complete, send back ATIO */
760 if (sent_status != 0 && event == CTIO_DONE)
761 send_ccb((union ccb *)atio, /*priority*/1);
763 /* Gap in offsets so wait until later callback */
765 warnx("IO %p:%p out of order %s", ccb_h,
766 a_descr, c_descr->event == AIO_DONE?
775 work_inot(struct ccb_immediate_notify *inot)
780 warnx("Working on INOT %p", inot);
782 status = inot->ccb_h.status;
783 status &= CAM_STATUS_MASK;
786 case CAM_SCSI_BUS_RESET:
787 tcmd_ua(CAM_TARGET_WILDCARD, UA_BUS_RESET);
791 tcmd_ua(CAM_TARGET_WILDCARD, UA_BDR);
794 case CAM_MESSAGE_RECV:
796 case MSG_TASK_COMPLETE:
797 case MSG_INITIATOR_DET_ERR:
798 case MSG_ABORT_TASK_SET:
799 case MSG_MESSAGE_REJECT:
801 case MSG_PARITY_ERROR:
802 case MSG_TARGET_RESET:
804 case MSG_CLEAR_TASK_SET:
806 warnx("INOT message %#x", inot->arg);
810 case CAM_REQ_ABORTED:
811 warnx("INOT %p aborted", inot);
814 warnx("Unhandled INOT status %#x", status);
819 TAILQ_REMOVE(&work_queue, &inot->ccb_h, periph_links.tqe);
820 send_ccb((union ccb *)inot, /*priority*/1);
826 send_ccb(union ccb *ccb, int priority)
829 warnx("sending ccb (%#x)", ccb->ccb_h.func_code);
830 ccb->ccb_h.pinfo.priority = priority;
831 if (XPT_FC_IS_QUEUED(ccb)) {
832 TAILQ_INSERT_TAIL(&pending_queue, &ccb->ccb_h,
835 if (write(targ_fd, &ccb, sizeof(ccb)) != sizeof(ccb)) {
837 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
841 /* Return a CTIO/descr/buf combo from the freelist or malloc one */
842 static struct ccb_scsiio *
845 struct ccb_scsiio *ctio;
846 struct ctio_descr *c_descr;
849 if (num_ctios == MAX_CTIOS) {
850 warnx("at CTIO max");
854 ctio = (struct ccb_scsiio *)malloc(sizeof(*ctio));
859 c_descr = (struct ctio_descr *)malloc(sizeof(*c_descr));
860 if (c_descr == NULL) {
862 warn("malloc ctio_descr");
865 c_descr->buf = malloc(buf_size);
866 if (c_descr->buf == NULL) {
869 warn("malloc backing store");
874 /* Initialize CTIO, CTIO descr, and AIO */
875 ctio->ccb_h.func_code = XPT_CONT_TARGET_IO;
876 ctio->ccb_h.retry_count = 2;
877 ctio->ccb_h.timeout = CAM_TIME_INFINITY;
878 ctio->data_ptr = c_descr->buf;
879 ctio->ccb_h.targ_descr = c_descr;
880 c_descr->aiocb.aio_buf = c_descr->buf;
881 c_descr->aiocb.aio_fildes = file_fd;
882 se = &c_descr->aiocb.aio_sigevent;
883 se->sigev_notify = SIGEV_KEVENT;
884 se->sigev_notify_kqueue = kq_fd;
885 se->sigev_value.sival_ptr = ctio;
891 free_ccb(union ccb *ccb)
893 switch (ccb->ccb_h.func_code) {
894 case XPT_CONT_TARGET_IO:
896 struct ctio_descr *c_descr;
898 c_descr = (struct ctio_descr *)ccb->ccb_h.targ_descr;
903 case XPT_ACCEPT_TARGET_IO:
904 free(ccb->ccb_h.targ_descr);
906 case XPT_IMMEDIATE_NOTIFY:
914 get_sim_flags(u_int16_t *flags)
916 struct ccb_pathinq cpi;
919 /* Find SIM capabilities */
920 bzero(&cpi, sizeof(cpi));
921 cpi.ccb_h.func_code = XPT_PATH_INQ;
922 send_ccb((union ccb *)&cpi, /*priority*/1);
923 status = cpi.ccb_h.status & CAM_STATUS_MASK;
924 if (status != CAM_REQ_CMP) {
925 fprintf(stderr, "CPI failed, status %#x\n", status);
929 /* Can only enable on controllers that support target mode */
930 if ((cpi.target_sprt & PIT_PROCESSOR) == 0) {
931 fprintf(stderr, "HBA does not support target mode\n");
932 status = CAM_PATH_INVALID;
936 *flags = cpi.hba_inquiry;
943 struct ccb_relsim crs;
945 bzero(&crs, sizeof(crs));
946 crs.ccb_h.func_code = XPT_REL_SIMQ;
947 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
949 crs.release_timeout = 0;
951 send_ccb((union ccb *)&crs, /*priority*/0);
954 /* Cancel all pending CCBs. */
958 struct ccb_abort cab;
959 struct ccb_hdr *ccb_h;
962 warnx("abort_all_pending");
964 bzero(&cab, sizeof(cab));
965 cab.ccb_h.func_code = XPT_ABORT;
966 TAILQ_FOREACH(ccb_h, &pending_queue, periph_links.tqe) {
968 warnx("Aborting pending CCB %p\n", ccb_h);
969 cab.abort_ccb = (union ccb *)ccb_h;
970 send_ccb((union ccb *)&cab, /*priority*/1);
971 if (cab.ccb_h.status != CAM_REQ_CMP) {
972 warnx("Unable to abort CCB, status %#x\n",
982 "Usage: scsi_target [-AdSTY] [-b bufsize] [-c sectorsize]\n"
983 "\t\t[-r numbufs] [-s volsize] [-W 8,16,32]\n"
984 "\t\tbus:target:lun filename\n");