]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - usr.sbin/camdd/camdd.c
MFC r304142: ensure stripe size is non-zero multiple of 4096
[FreeBSD/stable/10.git] / usr.sbin / camdd / camdd.c
1 /*-
2  * Copyright (c) 1997-2007 Kenneth D. Merry
3  * Copyright (c) 2013, 2014, 2015 Spectra Logic Corporation
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions, and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13  *    substantially similar to the "NO WARRANTY" disclaimer below
14  *    ("Disclaimer") and any redistribution must be conditioned upon
15  *    including a substantially similar Disclaimer requirement for further
16  *    binary redistribution.
17  *
18  * NO WARRANTY
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
22  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
27  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
28  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGES.
30  *
31  * Authors: Ken Merry           (Spectra Logic Corporation)
32  */
33
34 /*
35  * This is eventually intended to be:
36  * - A basic data transfer/copy utility
37  * - A simple benchmark utility
38  * - An example of how to use the asynchronous pass(4) driver interface.
39  */
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42
43 #include <sys/ioctl.h>
44 #include <sys/stdint.h>
45 #include <sys/types.h>
46 #include <sys/endian.h>
47 #include <sys/param.h>
48 #include <sys/sbuf.h>
49 #include <sys/stat.h>
50 #include <sys/event.h>
51 #include <sys/time.h>
52 #include <sys/uio.h>
53 #include <vm/vm.h>
54 #include <machine/bus.h>
55 #include <sys/bus.h>
56 #include <sys/bus_dma.h>
57 #include <sys/mtio.h>
58 #include <sys/conf.h>
59 #include <sys/disk.h>
60
61 #include <stdio.h>
62 #include <stdlib.h>
63 #include <semaphore.h>
64 #include <string.h>
65 #include <unistd.h>
66 #include <inttypes.h>
67 #include <limits.h>
68 #include <fcntl.h>
69 #include <ctype.h>
70 #include <err.h>
71 #include <libutil.h>
72 #include <pthread.h>
73 #include <assert.h>
74 #include <bsdxml.h>
75
76 #include <cam/cam.h>
77 #include <cam/cam_debug.h>
78 #include <cam/cam_ccb.h>
79 #include <cam/scsi/scsi_all.h>
80 #include <cam/scsi/scsi_da.h>
81 #include <cam/scsi/scsi_pass.h>
82 #include <cam/scsi/scsi_message.h>
83 #include <cam/scsi/smp_all.h>
84 #include <camlib.h>
85 #include <mtlib.h>
86 #include <zlib.h>
87
88 typedef enum {
89         CAMDD_CMD_NONE          = 0x00000000,
90         CAMDD_CMD_HELP          = 0x00000001,
91         CAMDD_CMD_WRITE         = 0x00000002,
92         CAMDD_CMD_READ          = 0x00000003
93 } camdd_cmdmask;
94
95 typedef enum {
96         CAMDD_ARG_NONE          = 0x00000000,
97         CAMDD_ARG_VERBOSE       = 0x00000001,
98         CAMDD_ARG_DEVICE        = 0x00000002,
99         CAMDD_ARG_BUS           = 0x00000004,
100         CAMDD_ARG_TARGET        = 0x00000008,
101         CAMDD_ARG_LUN           = 0x00000010,
102         CAMDD_ARG_UNIT          = 0x00000020,
103         CAMDD_ARG_TIMEOUT       = 0x00000040,
104         CAMDD_ARG_ERR_RECOVER   = 0x00000080,
105         CAMDD_ARG_RETRIES       = 0x00000100
106 } camdd_argmask;
107
108 typedef enum {
109         CAMDD_DEV_NONE          = 0x00,
110         CAMDD_DEV_PASS          = 0x01,
111         CAMDD_DEV_FILE          = 0x02
112 } camdd_dev_type;
113
114 struct camdd_io_opts {
115         camdd_dev_type  dev_type;
116         char            *dev_name;
117         uint64_t        blocksize;
118         uint64_t        queue_depth;
119         uint64_t        offset;
120         int             min_cmd_size;
121         int             write_dev;
122         uint64_t        debug;
123 };
124
125 typedef enum {
126         CAMDD_BUF_NONE,
127         CAMDD_BUF_DATA,
128         CAMDD_BUF_INDIRECT
129 } camdd_buf_type;
130
131 struct camdd_buf_indirect {
132         /*
133          * Pointer to the source buffer.
134          */
135         struct camdd_buf *src_buf;
136
137         /*
138          * Offset into the source buffer, in bytes.
139          */
140         uint64_t          offset;
141         /*
142          * Pointer to the starting point in the source buffer.
143          */
144         uint8_t          *start_ptr;
145
146         /*
147          * Length of this chunk in bytes.
148          */
149         size_t            len;
150 };
151
152 struct camdd_buf_data {
153         /*
154          * Buffer allocated when we allocate this camdd_buf.  This should
155          * be the size of the blocksize for this device.
156          */
157         uint8_t                 *buf;
158
159         /*
160          * The amount of backing store allocated in buf.  Generally this
161          * will be the blocksize of the device.
162          */
163         uint32_t                 alloc_len;
164
165         /*
166          * The amount of data that was put into the buffer (on reads) or
167          * the amount of data we have put onto the src_list so far (on
168          * writes).
169          */
170         uint32_t                 fill_len;
171
172         /*
173          * The amount of data that was not transferred.
174          */
175         uint32_t                 resid;
176
177         /*
178          * Starting byte offset on the reader.
179          */
180         uint64_t                 src_start_offset;
181         
182         /*
183          * CCB used for pass(4) device targets.
184          */
185         union ccb                ccb;
186
187         /*
188          * Number of scatter/gather segments.
189          */
190         int                      sg_count;
191
192         /*
193          * Set if we had to tack on an extra buffer to round the transfer
194          * up to a sector size.
195          */
196         int                      extra_buf;
197
198         /*
199          * Scatter/gather list used generally when we're the writer for a
200          * pass(4) device. 
201          */
202         bus_dma_segment_t       *segs;
203
204         /*
205          * Scatter/gather list used generally when we're the writer for a
206          * file or block device;
207          */
208         struct iovec            *iovec;
209 };
210
211 union camdd_buf_types {
212         struct camdd_buf_indirect       indirect;
213         struct camdd_buf_data           data;
214 };
215
216 typedef enum {
217         CAMDD_STATUS_NONE,
218         CAMDD_STATUS_OK,
219         CAMDD_STATUS_SHORT_IO,
220         CAMDD_STATUS_EOF,
221         CAMDD_STATUS_ERROR
222 } camdd_buf_status;
223
224 struct camdd_buf {
225         camdd_buf_type           buf_type;
226         union camdd_buf_types    buf_type_spec;
227
228         camdd_buf_status         status;
229
230         uint64_t                 lba;
231         size_t                   len;
232
233         /*
234          * A reference count of how many indirect buffers point to this
235          * buffer.
236          */
237         int                      refcount;
238
239         /*
240          * A link back to our parent device.
241          */
242         struct camdd_dev        *dev;
243         STAILQ_ENTRY(camdd_buf)  links;
244         STAILQ_ENTRY(camdd_buf)  work_links;
245
246         /*
247          * A count of the buffers on the src_list.
248          */
249         int                      src_count;
250
251         /*
252          * List of buffers from our partner thread that are the components
253          * of this buffer for the I/O.  Uses src_links.
254          */
255         STAILQ_HEAD(,camdd_buf)  src_list;
256         STAILQ_ENTRY(camdd_buf)  src_links;
257 };
258
259 #define NUM_DEV_TYPES   2
260
261 struct camdd_dev_pass {
262         int                      scsi_dev_type;
263         struct cam_device       *dev;
264         uint64_t                 max_sector;
265         uint32_t                 block_len;
266         uint32_t                 cpi_maxio;
267 };
268
269 typedef enum {
270         CAMDD_FILE_NONE,
271         CAMDD_FILE_REG,
272         CAMDD_FILE_STD,
273         CAMDD_FILE_PIPE,
274         CAMDD_FILE_DISK,
275         CAMDD_FILE_TAPE,
276         CAMDD_FILE_TTY,
277         CAMDD_FILE_MEM
278 } camdd_file_type;
279
280 typedef enum {
281         CAMDD_FF_NONE           = 0x00,
282         CAMDD_FF_CAN_SEEK       = 0x01
283 } camdd_file_flags;
284
285 struct camdd_dev_file {
286         int                      fd;
287         struct stat              sb;
288         char                     filename[MAXPATHLEN + 1];
289         camdd_file_type          file_type;
290         camdd_file_flags         file_flags;
291         uint8_t                 *tmp_buf;
292 };
293
294 struct camdd_dev_block {
295         int                      fd;
296         uint64_t                 size_bytes;
297         uint32_t                 block_len;
298 };
299
300 union camdd_dev_spec {
301         struct camdd_dev_pass   pass;
302         struct camdd_dev_file   file;
303         struct camdd_dev_block  block;
304 };
305
306 typedef enum {
307         CAMDD_DEV_FLAG_NONE             = 0x00,
308         CAMDD_DEV_FLAG_EOF              = 0x01,
309         CAMDD_DEV_FLAG_PEER_EOF         = 0x02,
310         CAMDD_DEV_FLAG_ACTIVE           = 0x04,
311         CAMDD_DEV_FLAG_EOF_SENT         = 0x08,
312         CAMDD_DEV_FLAG_EOF_QUEUED       = 0x10
313 } camdd_dev_flags;
314
315 struct camdd_dev {
316         camdd_dev_type           dev_type;
317         union camdd_dev_spec     dev_spec;
318         camdd_dev_flags          flags;
319         char                     device_name[MAXPATHLEN+1];
320         uint32_t                 blocksize;
321         uint32_t                 sector_size;
322         uint64_t                 max_sector;
323         uint64_t                 sector_io_limit;
324         int                      min_cmd_size;
325         int                      write_dev;
326         int                      retry_count;
327         int                      io_timeout;
328         int                      debug;
329         uint64_t                 start_offset_bytes;
330         uint64_t                 next_io_pos_bytes;
331         uint64_t                 next_peer_pos_bytes;
332         uint64_t                 next_completion_pos_bytes;
333         uint64_t                 peer_bytes_queued;
334         uint64_t                 bytes_transferred;
335         uint32_t                 target_queue_depth;
336         uint32_t                 cur_active_io;
337         uint8_t                 *extra_buf;
338         uint32_t                 extra_buf_len;
339         struct camdd_dev        *peer_dev;
340         pthread_mutex_t          mutex;
341         pthread_cond_t           cond;
342         int                      kq;
343
344         int                      (*run)(struct camdd_dev *dev);
345         int                      (*fetch)(struct camdd_dev *dev);
346
347         /*
348          * Buffers that are available for I/O.  Uses links.
349          */
350         STAILQ_HEAD(,camdd_buf)  free_queue;
351
352         /*
353          * Free indirect buffers.  These are used for breaking a large
354          * buffer into multiple pieces.
355          */
356         STAILQ_HEAD(,camdd_buf)  free_indirect_queue;
357
358         /*
359          * Buffers that have been queued to the kernel.  Uses links.
360          */
361         STAILQ_HEAD(,camdd_buf)  active_queue;
362
363         /*
364          * Will generally contain one of our buffers that is waiting for enough
365          * I/O from our partner thread to be able to execute.  This will
366          * generally happen when our per-I/O-size is larger than the
367          * partner thread's per-I/O-size.  Uses links.
368          */
369         STAILQ_HEAD(,camdd_buf)  pending_queue;
370
371         /*
372          * Number of buffers on the pending queue
373          */
374         int                      num_pending_queue;
375
376         /*
377          * Buffers that are filled and ready to execute.  This is used when
378          * our partner (reader) thread sends us blocks that are larger than
379          * our blocksize, and so we have to split them into multiple pieces.
380          */
381         STAILQ_HEAD(,camdd_buf)  run_queue;
382
383         /*
384          * Number of buffers on the run queue.
385          */
386         int                      num_run_queue;
387
388         STAILQ_HEAD(,camdd_buf)  reorder_queue;
389
390         int                      num_reorder_queue;
391
392         /*
393          * Buffers that have been queued to us by our partner thread
394          * (generally the reader thread) to be written out.  Uses
395          * work_links.
396          */
397         STAILQ_HEAD(,camdd_buf)  work_queue;
398
399         /*
400          * Buffers that have been completed by our partner thread.  Uses
401          * work_links.
402          */
403         STAILQ_HEAD(,camdd_buf)  peer_done_queue;
404
405         /*
406          * Number of buffers on the peer done queue.
407          */
408         uint32_t                 num_peer_done_queue;
409
410         /*
411          * A list of buffers that we have queued to our peer thread.  Uses
412          * links.
413          */
414         STAILQ_HEAD(,camdd_buf)  peer_work_queue;
415
416         /*
417          * Number of buffers on the peer work queue.
418          */
419         uint32_t                 num_peer_work_queue;
420 };
421
422 static sem_t camdd_sem;
423 static int need_exit = 0;
424 static int error_exit = 0;
425 static int need_status = 0;
426
427 #ifndef min
428 #define min(a, b) (a < b) ? a : b
429 #endif
430
431 /*
432  * XXX KDM private copy of timespecsub().  This is normally defined in
433  * sys/time.h, but is only enabled in the kernel.  If that definition is
434  * enabled in userland, it breaks the build of libnetbsd.
435  */
436 #ifndef timespecsub
437 #define timespecsub(vvp, uvp)                                           \
438         do {                                                            \
439                 (vvp)->tv_sec -= (uvp)->tv_sec;                         \
440                 (vvp)->tv_nsec -= (uvp)->tv_nsec;                       \
441                 if ((vvp)->tv_nsec < 0) {                               \
442                         (vvp)->tv_sec--;                                \
443                         (vvp)->tv_nsec += 1000000000;                   \
444                 }                                                       \
445         } while (0)
446 #endif
447
448
449 /* Generically usefull offsets into the peripheral private area */
450 #define ppriv_ptr0 periph_priv.entries[0].ptr
451 #define ppriv_ptr1 periph_priv.entries[1].ptr
452 #define ppriv_field0 periph_priv.entries[0].field
453 #define ppriv_field1 periph_priv.entries[1].field
454
455 #define ccb_buf ppriv_ptr0
456
457 #define CAMDD_FILE_DEFAULT_BLOCK        524288
458 #define CAMDD_FILE_DEFAULT_DEPTH        1
459 #define CAMDD_PASS_MAX_BLOCK            1048576
460 #define CAMDD_PASS_DEFAULT_DEPTH        6
461 #define CAMDD_PASS_RW_TIMEOUT           60 * 1000
462
463 static int parse_btl(char *tstr, int *bus, int *target, int *lun,
464                      camdd_argmask *arglst);
465 void camdd_free_dev(struct camdd_dev *dev);
466 struct camdd_dev *camdd_alloc_dev(camdd_dev_type dev_type,
467                                   struct kevent *new_ke, int num_ke,
468                                   int retry_count, int timeout);
469 static struct camdd_buf *camdd_alloc_buf(struct camdd_dev *dev,
470                                          camdd_buf_type buf_type);
471 void camdd_release_buf(struct camdd_buf *buf);
472 struct camdd_buf *camdd_get_buf(struct camdd_dev *dev, camdd_buf_type buf_type);
473 int camdd_buf_sg_create(struct camdd_buf *buf, int iovec,
474                         uint32_t sector_size, uint32_t *num_sectors_used,
475                         int *double_buf_needed);
476 uint32_t camdd_buf_get_len(struct camdd_buf *buf);
477 void camdd_buf_add_child(struct camdd_buf *buf, struct camdd_buf *child_buf);
478 int camdd_probe_tape(int fd, char *filename, uint64_t *max_iosize,
479                      uint64_t *max_blk, uint64_t *min_blk, uint64_t *blk_gran);
480 struct camdd_dev *camdd_probe_file(int fd, struct camdd_io_opts *io_opts,
481                                    int retry_count, int timeout);
482 struct camdd_dev *camdd_probe_pass(struct cam_device *cam_dev,
483                                    struct camdd_io_opts *io_opts,
484                                    camdd_argmask arglist, int probe_retry_count,
485                                    int probe_timeout, int io_retry_count,
486                                    int io_timeout);
487 void *camdd_file_worker(void *arg);
488 camdd_buf_status camdd_ccb_status(union ccb *ccb);
489 int camdd_queue_peer_buf(struct camdd_dev *dev, struct camdd_buf *buf);
490 int camdd_complete_peer_buf(struct camdd_dev *dev, struct camdd_buf *peer_buf);
491 void camdd_peer_done(struct camdd_buf *buf);
492 void camdd_complete_buf(struct camdd_dev *dev, struct camdd_buf *buf,
493                         int *error_count);
494 int camdd_pass_fetch(struct camdd_dev *dev);
495 int camdd_file_run(struct camdd_dev *dev);
496 int camdd_pass_run(struct camdd_dev *dev);
497 int camdd_get_next_lba_len(struct camdd_dev *dev, uint64_t *lba, ssize_t *len);
498 int camdd_queue(struct camdd_dev *dev, struct camdd_buf *read_buf);
499 void camdd_get_depth(struct camdd_dev *dev, uint32_t *our_depth,
500                      uint32_t *peer_depth, uint32_t *our_bytes,
501                      uint32_t *peer_bytes);
502 void *camdd_worker(void *arg);
503 void camdd_sig_handler(int sig);
504 void camdd_print_status(struct camdd_dev *camdd_dev,
505                         struct camdd_dev *other_dev,
506                         struct timespec *start_time);
507 int camdd_rw(struct camdd_io_opts *io_opts, int num_io_opts,
508              uint64_t max_io, int retry_count, int timeout);
509 int camdd_parse_io_opts(char *args, int is_write,
510                         struct camdd_io_opts *io_opts);
511 void usage(void);
512
513 /*
514  * Parse out a bus, or a bus, target and lun in the following
515  * format:
516  * bus
517  * bus:target
518  * bus:target:lun
519  *
520  * Returns the number of parsed components, or 0.
521  */
522 static int
523 parse_btl(char *tstr, int *bus, int *target, int *lun, camdd_argmask *arglst)
524 {
525         char *tmpstr;
526         int convs = 0;
527
528         while (isspace(*tstr) && (*tstr != '\0'))
529                 tstr++;
530
531         tmpstr = (char *)strtok(tstr, ":");
532         if ((tmpstr != NULL) && (*tmpstr != '\0')) {
533                 *bus = strtol(tmpstr, NULL, 0);
534                 *arglst |= CAMDD_ARG_BUS;
535                 convs++;
536                 tmpstr = (char *)strtok(NULL, ":");
537                 if ((tmpstr != NULL) && (*tmpstr != '\0')) {
538                         *target = strtol(tmpstr, NULL, 0);
539                         *arglst |= CAMDD_ARG_TARGET;
540                         convs++;
541                         tmpstr = (char *)strtok(NULL, ":");
542                         if ((tmpstr != NULL) && (*tmpstr != '\0')) {
543                                 *lun = strtol(tmpstr, NULL, 0);
544                                 *arglst |= CAMDD_ARG_LUN;
545                                 convs++;
546                         }
547                 }
548         }
549
550         return convs;
551 }
552
553 /*
554  * XXX KDM clean up and free all of the buffers on the queue!
555  */
556 void
557 camdd_free_dev(struct camdd_dev *dev)
558 {
559         if (dev == NULL)
560                 return;
561
562         switch (dev->dev_type) {
563         case CAMDD_DEV_FILE: {
564                 struct camdd_dev_file *file_dev = &dev->dev_spec.file;
565
566                 if (file_dev->fd != -1)
567                         close(file_dev->fd);
568                 free(file_dev->tmp_buf);
569                 break;
570         }
571         case CAMDD_DEV_PASS: {
572                 struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass;
573
574                 if (pass_dev->dev != NULL)
575                         cam_close_device(pass_dev->dev);
576                 break;
577         }
578         default:
579                 break;
580         }
581
582         free(dev);
583 }
584
585 struct camdd_dev *
586 camdd_alloc_dev(camdd_dev_type dev_type, struct kevent *new_ke, int num_ke,
587                 int retry_count, int timeout)
588 {
589         struct camdd_dev *dev = NULL;
590         struct kevent *ke;
591         size_t ke_size;
592         int retval = 0;
593
594         dev = malloc(sizeof(*dev));
595         if (dev == NULL) {
596                 warn("%s: unable to malloc %zu bytes", __func__, sizeof(*dev));
597                 goto bailout;
598         }
599
600         bzero(dev, sizeof(*dev));
601
602         dev->dev_type = dev_type;
603         dev->io_timeout = timeout;
604         dev->retry_count = retry_count;
605         STAILQ_INIT(&dev->free_queue);
606         STAILQ_INIT(&dev->free_indirect_queue);
607         STAILQ_INIT(&dev->active_queue);
608         STAILQ_INIT(&dev->pending_queue);
609         STAILQ_INIT(&dev->run_queue);
610         STAILQ_INIT(&dev->reorder_queue);
611         STAILQ_INIT(&dev->work_queue);
612         STAILQ_INIT(&dev->peer_done_queue);
613         STAILQ_INIT(&dev->peer_work_queue);
614         retval = pthread_mutex_init(&dev->mutex, NULL);
615         if (retval != 0) {
616                 warnc(retval, "%s: failed to initialize mutex", __func__);
617                 goto bailout;
618         }
619
620         retval = pthread_cond_init(&dev->cond, NULL);
621         if (retval != 0) {
622                 warnc(retval, "%s: failed to initialize condition variable",
623                       __func__);
624                 goto bailout;
625         }
626
627         dev->kq = kqueue();
628         if (dev->kq == -1) {
629                 warn("%s: Unable to create kqueue", __func__);
630                 goto bailout;
631         }
632
633         ke_size = sizeof(struct kevent) * (num_ke + 4);
634         ke = malloc(ke_size);
635         if (ke == NULL) {
636                 warn("%s: unable to malloc %zu bytes", __func__, ke_size);
637                 goto bailout;
638         }
639         bzero(ke, ke_size);
640         if (num_ke > 0)
641                 bcopy(new_ke, ke, num_ke * sizeof(struct kevent));
642
643         EV_SET(&ke[num_ke++], (uintptr_t)&dev->work_queue, EVFILT_USER,
644                EV_ADD|EV_ENABLE|EV_CLEAR, 0,0, 0);
645         EV_SET(&ke[num_ke++], (uintptr_t)&dev->peer_done_queue, EVFILT_USER,
646                EV_ADD|EV_ENABLE|EV_CLEAR, 0,0, 0);
647         EV_SET(&ke[num_ke++], SIGINFO, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0,0,0);
648         EV_SET(&ke[num_ke++], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0,0,0);
649
650         retval = kevent(dev->kq, ke, num_ke, NULL, 0, NULL);
651         if (retval == -1) {
652                 warn("%s: Unable to register kevents", __func__);
653                 goto bailout;
654         }
655
656
657         return (dev);
658
659 bailout:
660         free(dev);
661
662         return (NULL);
663 }
664
665 static struct camdd_buf *
666 camdd_alloc_buf(struct camdd_dev *dev, camdd_buf_type buf_type)
667 {
668         struct camdd_buf *buf = NULL;
669         uint8_t *data_ptr = NULL;
670
671         /*
672          * We only need to allocate data space for data buffers.
673          */
674         switch (buf_type) {
675         case CAMDD_BUF_DATA:
676                 data_ptr = malloc(dev->blocksize);
677                 if (data_ptr == NULL) {
678                         warn("unable to allocate %u bytes", dev->blocksize);
679                         goto bailout_error;
680                 }
681                 break;
682         default:
683                 break;
684         }
685         
686         buf = malloc(sizeof(*buf));
687         if (buf == NULL) {
688                 warn("unable to allocate %zu bytes", sizeof(*buf));
689                 goto bailout_error;
690         }
691
692         bzero(buf, sizeof(*buf));
693         buf->buf_type = buf_type;
694         buf->dev = dev;
695         switch (buf_type) {
696         case CAMDD_BUF_DATA: {
697                 struct camdd_buf_data *data;
698
699                 data = &buf->buf_type_spec.data;
700
701                 data->alloc_len = dev->blocksize;
702                 data->buf = data_ptr;
703                 break;
704         }
705         case CAMDD_BUF_INDIRECT:
706                 break;
707         default:
708                 break;
709         }
710         STAILQ_INIT(&buf->src_list);
711
712         return (buf);
713
714 bailout_error:
715         if (data_ptr != NULL)
716                 free(data_ptr);
717
718         if (buf != NULL)
719                 free(buf);
720
721         return (NULL);
722 }
723
724 void
725 camdd_release_buf(struct camdd_buf *buf)
726 {
727         struct camdd_dev *dev;
728
729         dev = buf->dev;
730
731         switch (buf->buf_type) {
732         case CAMDD_BUF_DATA: {
733                 struct camdd_buf_data *data;
734
735                 data = &buf->buf_type_spec.data;
736
737                 if (data->segs != NULL) {
738                         if (data->extra_buf != 0) {
739                                 void *extra_buf;
740
741                                 extra_buf = (void *)
742                                     data->segs[data->sg_count - 1].ds_addr;
743                                 free(extra_buf);
744                                 data->extra_buf = 0;
745                         }
746                         free(data->segs);
747                         data->segs = NULL;
748                         data->sg_count = 0;
749                 } else if (data->iovec != NULL) {
750                         if (data->extra_buf != 0) {
751                                 free(data->iovec[data->sg_count - 1].iov_base);
752                                 data->extra_buf = 0;
753                         }
754                         free(data->iovec);
755                         data->iovec = NULL;
756                         data->sg_count = 0;
757                 }
758                 STAILQ_INSERT_TAIL(&dev->free_queue, buf, links);
759                 break;
760         }
761         case CAMDD_BUF_INDIRECT:
762                 STAILQ_INSERT_TAIL(&dev->free_indirect_queue, buf, links);
763                 break;
764         default:
765                 err(1, "%s: Invalid buffer type %d for released buffer",
766                     __func__, buf->buf_type);
767                 break;
768         }
769 }
770
771 struct camdd_buf *
772 camdd_get_buf(struct camdd_dev *dev, camdd_buf_type buf_type)
773 {
774         struct camdd_buf *buf = NULL;
775
776         switch (buf_type) {
777         case CAMDD_BUF_DATA:
778                 buf = STAILQ_FIRST(&dev->free_queue);
779                 if (buf != NULL) {
780                         struct camdd_buf_data *data;
781                         uint8_t *data_ptr;
782                         uint32_t alloc_len;
783
784                         STAILQ_REMOVE_HEAD(&dev->free_queue, links);
785                         data = &buf->buf_type_spec.data;
786                         data_ptr = data->buf;
787                         alloc_len = data->alloc_len;
788                         bzero(buf, sizeof(*buf));
789                         data->buf = data_ptr;
790                         data->alloc_len = alloc_len;
791                 }
792                 break;
793         case CAMDD_BUF_INDIRECT:
794                 buf = STAILQ_FIRST(&dev->free_indirect_queue);
795                 if (buf != NULL) {
796                         STAILQ_REMOVE_HEAD(&dev->free_indirect_queue, links);
797
798                         bzero(buf, sizeof(*buf));
799                 }
800                 break;
801         default:
802                 warnx("Unknown buffer type %d requested", buf_type);
803                 break;
804         }
805
806
807         if (buf == NULL)
808                 return (camdd_alloc_buf(dev, buf_type));
809         else {
810                 STAILQ_INIT(&buf->src_list);
811                 buf->dev = dev;
812                 buf->buf_type = buf_type;
813
814                 return (buf);
815         }
816 }
817
818 int
819 camdd_buf_sg_create(struct camdd_buf *buf, int iovec, uint32_t sector_size,
820                     uint32_t *num_sectors_used, int *double_buf_needed)
821 {
822         struct camdd_buf *tmp_buf;
823         struct camdd_buf_data *data;
824         uint8_t *extra_buf = NULL;
825         size_t extra_buf_len = 0;
826         int i, retval = 0;
827
828         data = &buf->buf_type_spec.data;
829
830         data->sg_count = buf->src_count;
831         /*
832          * Compose a scatter/gather list from all of the buffers in the list.
833          * If the length of the buffer isn't a multiple of the sector size,
834          * we'll have to add an extra buffer.  This should only happen
835          * at the end of a transfer.
836          */
837         if ((data->fill_len % sector_size) != 0) {
838                 extra_buf_len = sector_size - (data->fill_len % sector_size);
839                 extra_buf = calloc(extra_buf_len, 1);
840                 if (extra_buf == NULL) {
841                         warn("%s: unable to allocate %zu bytes for extra "
842                             "buffer space", __func__, extra_buf_len);
843                         retval = 1;
844                         goto bailout;
845                 }
846                 data->extra_buf = 1;
847                 data->sg_count++;
848         }
849         if (iovec == 0) {
850                 data->segs = calloc(data->sg_count, sizeof(bus_dma_segment_t));
851                 if (data->segs == NULL) {
852                         warn("%s: unable to allocate %zu bytes for S/G list",
853                             __func__, sizeof(bus_dma_segment_t) *
854                             data->sg_count);
855                         retval = 1;
856                         goto bailout;
857                 }
858
859         } else {
860                 data->iovec = calloc(data->sg_count, sizeof(struct iovec));
861                 if (data->iovec == NULL) {
862                         warn("%s: unable to allocate %zu bytes for S/G list",
863                             __func__, sizeof(struct iovec) * data->sg_count);
864                         retval = 1;
865                         goto bailout;
866                 }
867         }
868
869         for (i = 0, tmp_buf = STAILQ_FIRST(&buf->src_list);
870              i < buf->src_count && tmp_buf != NULL; i++,
871              tmp_buf = STAILQ_NEXT(tmp_buf, src_links)) {
872
873                 if (tmp_buf->buf_type == CAMDD_BUF_DATA) {
874                         struct camdd_buf_data *tmp_data;
875
876                         tmp_data = &tmp_buf->buf_type_spec.data;
877                         if (iovec == 0) {
878                                 data->segs[i].ds_addr =
879                                     (bus_addr_t) tmp_data->buf;
880                                 data->segs[i].ds_len = tmp_data->fill_len -
881                                     tmp_data->resid;
882                         } else {
883                                 data->iovec[i].iov_base = tmp_data->buf;
884                                 data->iovec[i].iov_len = tmp_data->fill_len -
885                                     tmp_data->resid;
886                         }
887                         if (((tmp_data->fill_len - tmp_data->resid) %
888                              sector_size) != 0)
889                                 *double_buf_needed = 1;
890                 } else {
891                         struct camdd_buf_indirect *tmp_ind;
892
893                         tmp_ind = &tmp_buf->buf_type_spec.indirect;
894                         if (iovec == 0) {
895                                 data->segs[i].ds_addr =
896                                     (bus_addr_t)tmp_ind->start_ptr;
897                                 data->segs[i].ds_len = tmp_ind->len;
898                         } else {
899                                 data->iovec[i].iov_base = tmp_ind->start_ptr;
900                                 data->iovec[i].iov_len = tmp_ind->len;
901                         }
902                         if ((tmp_ind->len % sector_size) != 0)
903                                 *double_buf_needed = 1;
904                 }
905         }
906
907         if (extra_buf != NULL) {
908                 if (iovec == 0) {
909                         data->segs[i].ds_addr = (bus_addr_t)extra_buf;
910                         data->segs[i].ds_len = extra_buf_len;
911                 } else {
912                         data->iovec[i].iov_base = extra_buf;
913                         data->iovec[i].iov_len = extra_buf_len;
914                 }
915                 i++;
916         }
917         if ((tmp_buf != NULL) || (i != data->sg_count)) {
918                 warnx("buffer source count does not match "
919                       "number of buffers in list!");
920                 retval = 1;
921                 goto bailout;
922         }
923
924 bailout:
925         if (retval == 0) {
926                 *num_sectors_used = (data->fill_len + extra_buf_len) /
927                     sector_size;
928         }
929         return (retval);
930 }
931
932 uint32_t
933 camdd_buf_get_len(struct camdd_buf *buf)
934 {
935         uint32_t len = 0;
936
937         if (buf->buf_type != CAMDD_BUF_DATA) {
938                 struct camdd_buf_indirect *indirect;
939
940                 indirect = &buf->buf_type_spec.indirect;
941                 len = indirect->len;
942         } else {
943                 struct camdd_buf_data *data;
944
945                 data = &buf->buf_type_spec.data;
946                 len = data->fill_len;
947         }
948
949         return (len);
950 }
951
952 void
953 camdd_buf_add_child(struct camdd_buf *buf, struct camdd_buf *child_buf)
954 {
955         struct camdd_buf_data *data;
956
957         assert(buf->buf_type == CAMDD_BUF_DATA);
958
959         data = &buf->buf_type_spec.data;
960
961         STAILQ_INSERT_TAIL(&buf->src_list, child_buf, src_links);
962         buf->src_count++;
963
964         data->fill_len += camdd_buf_get_len(child_buf);
965 }
966
967 typedef enum {
968         CAMDD_TS_MAX_BLK,
969         CAMDD_TS_MIN_BLK,
970         CAMDD_TS_BLK_GRAN,
971         CAMDD_TS_EFF_IOSIZE
972 } camdd_status_item_index;
973
974 static struct camdd_status_items {
975         const char *name;
976         struct mt_status_entry *entry;
977 } req_status_items[] = {
978         { "max_blk", NULL },
979         { "min_blk", NULL },
980         { "blk_gran", NULL },
981         { "max_effective_iosize", NULL }
982 };
983
984 int
985 camdd_probe_tape(int fd, char *filename, uint64_t *max_iosize,
986                  uint64_t *max_blk, uint64_t *min_blk, uint64_t *blk_gran)
987 {
988         struct mt_status_data status_data;
989         char *xml_str = NULL;
990         unsigned int i;
991         int retval = 0;
992         
993         retval = mt_get_xml_str(fd, MTIOCEXTGET, &xml_str);
994         if (retval != 0)
995                 err(1, "Couldn't get XML string from %s", filename);
996
997         retval = mt_get_status(xml_str, &status_data);
998         if (retval != XML_STATUS_OK) {
999                 warn("couldn't get status for %s", filename);
1000                 retval = 1;
1001                 goto bailout;
1002         } else
1003                 retval = 0;
1004
1005         if (status_data.error != 0) {
1006                 warnx("%s", status_data.error_str);
1007                 retval = 1;
1008                 goto bailout;
1009         }
1010
1011         for (i = 0; i < sizeof(req_status_items) /
1012              sizeof(req_status_items[0]); i++) {
1013                 char *name;
1014
1015                 name = __DECONST(char *, req_status_items[i].name);
1016                 req_status_items[i].entry = mt_status_entry_find(&status_data,
1017                     name);
1018                 if (req_status_items[i].entry == NULL) {
1019                         errx(1, "Cannot find status entry %s",
1020                             req_status_items[i].name);
1021                 }
1022         }
1023
1024         *max_iosize = req_status_items[CAMDD_TS_EFF_IOSIZE].entry->value_unsigned;
1025         *max_blk= req_status_items[CAMDD_TS_MAX_BLK].entry->value_unsigned;
1026         *min_blk= req_status_items[CAMDD_TS_MIN_BLK].entry->value_unsigned;
1027         *blk_gran = req_status_items[CAMDD_TS_BLK_GRAN].entry->value_unsigned;
1028 bailout:
1029
1030         free(xml_str);
1031         mt_status_free(&status_data);
1032
1033         return (retval);
1034 }
1035
1036 struct camdd_dev *
1037 camdd_probe_file(int fd, struct camdd_io_opts *io_opts, int retry_count,
1038     int timeout)
1039 {
1040         struct camdd_dev *dev = NULL;
1041         struct camdd_dev_file *file_dev;
1042         uint64_t blocksize = io_opts->blocksize;
1043
1044         dev = camdd_alloc_dev(CAMDD_DEV_FILE, NULL, 0, retry_count, timeout);
1045         if (dev == NULL)
1046                 goto bailout;
1047
1048         file_dev = &dev->dev_spec.file;
1049         file_dev->fd = fd;
1050         strlcpy(file_dev->filename, io_opts->dev_name,
1051             sizeof(file_dev->filename));
1052         strlcpy(dev->device_name, io_opts->dev_name, sizeof(dev->device_name));
1053         if (blocksize == 0)
1054                 dev->blocksize = CAMDD_FILE_DEFAULT_BLOCK;
1055         else
1056                 dev->blocksize = blocksize;
1057
1058         if ((io_opts->queue_depth != 0)
1059          && (io_opts->queue_depth != 1)) {
1060                 warnx("Queue depth %ju for %s ignored, only 1 outstanding "
1061                     "command supported", (uintmax_t)io_opts->queue_depth,
1062                     io_opts->dev_name);
1063         }
1064         dev->target_queue_depth = CAMDD_FILE_DEFAULT_DEPTH;
1065         dev->run = camdd_file_run;
1066         dev->fetch = NULL;
1067
1068         /*
1069          * We can effectively access files on byte boundaries.  We'll reset
1070          * this for devices like disks that can be accessed on sector
1071          * boundaries.
1072          */
1073         dev->sector_size = 1;
1074
1075         if ((fd != STDIN_FILENO)
1076          && (fd != STDOUT_FILENO)) {
1077                 int retval;
1078
1079                 retval = fstat(fd, &file_dev->sb);
1080                 if (retval != 0) {
1081                         warn("Cannot stat %s", dev->device_name);
1082                         goto bailout_error;
1083                 }
1084                 if (S_ISREG(file_dev->sb.st_mode)) {
1085                         file_dev->file_type = CAMDD_FILE_REG;
1086                 } else if (S_ISCHR(file_dev->sb.st_mode)) {
1087                         int type;
1088
1089                         if (ioctl(fd, FIODTYPE, &type) == -1)
1090                                 err(1, "FIODTYPE ioctl failed on %s",
1091                                     dev->device_name);
1092                         else {
1093                                 if (type & D_TAPE)
1094                                         file_dev->file_type = CAMDD_FILE_TAPE;
1095                                 else if (type & D_DISK)
1096                                         file_dev->file_type = CAMDD_FILE_DISK;
1097                                 else if (type & D_MEM)
1098                                         file_dev->file_type = CAMDD_FILE_MEM;
1099                                 else if (type & D_TTY)
1100                                         file_dev->file_type = CAMDD_FILE_TTY;
1101                         }
1102                 } else if (S_ISDIR(file_dev->sb.st_mode)) {
1103                         errx(1, "cannot operate on directory %s",
1104                             dev->device_name);
1105                 } else if (S_ISFIFO(file_dev->sb.st_mode)) {
1106                         file_dev->file_type = CAMDD_FILE_PIPE;
1107                 } else
1108                         errx(1, "Cannot determine file type for %s",
1109                             dev->device_name);
1110
1111                 switch (file_dev->file_type) {
1112                 case CAMDD_FILE_REG:
1113                         if (file_dev->sb.st_size != 0)
1114                                 dev->max_sector = file_dev->sb.st_size - 1;
1115                         else
1116                                 dev->max_sector = 0;
1117                         file_dev->file_flags |= CAMDD_FF_CAN_SEEK;
1118                         break;
1119                 case CAMDD_FILE_TAPE: {
1120                         uint64_t max_iosize, max_blk, min_blk, blk_gran;
1121                         /*
1122                          * Check block limits and maximum effective iosize.
1123                          * Make sure the blocksize is within the block
1124                          * limits (and a multiple of the minimum blocksize)
1125                          * and that the blocksize is <= maximum effective
1126                          * iosize.
1127                          */
1128                         retval = camdd_probe_tape(fd, dev->device_name,
1129                             &max_iosize, &max_blk, &min_blk, &blk_gran);
1130                         if (retval != 0)
1131                                 errx(1, "Unable to probe tape %s",
1132                                     dev->device_name);
1133
1134                         /*
1135                          * The blocksize needs to be <= the maximum
1136                          * effective I/O size of the tape device.  Note
1137                          * that this also takes into account the maximum
1138                          * blocksize reported by READ BLOCK LIMITS.
1139                          */
1140                         if (dev->blocksize > max_iosize) {
1141                                 warnx("Blocksize %u too big for %s, limiting "
1142                                     "to %ju", dev->blocksize, dev->device_name,
1143                                     max_iosize);
1144                                 dev->blocksize = max_iosize;
1145                         }
1146
1147                         /*
1148                          * The blocksize needs to be at least min_blk;
1149                          */
1150                         if (dev->blocksize < min_blk) {
1151                                 warnx("Blocksize %u too small for %s, "
1152                                     "increasing to %ju", dev->blocksize,
1153                                     dev->device_name, min_blk);
1154                                 dev->blocksize = min_blk;
1155                         }
1156
1157                         /*
1158                          * And the blocksize needs to be a multiple of
1159                          * the block granularity.
1160                          */
1161                         if ((blk_gran != 0)
1162                          && (dev->blocksize % (1 << blk_gran))) {
1163                                 warnx("Blocksize %u for %s not a multiple of "
1164                                     "%d, adjusting to %d", dev->blocksize,
1165                                     dev->device_name, (1 << blk_gran),
1166                                     dev->blocksize & ~((1 << blk_gran) - 1));
1167                                 dev->blocksize &= ~((1 << blk_gran) - 1);
1168                         }
1169
1170                         if (dev->blocksize == 0) {
1171                                 errx(1, "Unable to derive valid blocksize for "
1172                                     "%s", dev->device_name);
1173                         }
1174
1175                         /*
1176                          * For tape drives, set the sector size to the
1177                          * blocksize so that we make sure not to write
1178                          * less than the blocksize out to the drive.
1179                          */
1180                         dev->sector_size = dev->blocksize;
1181                         break;
1182                 }
1183                 case CAMDD_FILE_DISK: {
1184                         off_t media_size;
1185                         unsigned int sector_size;
1186
1187                         file_dev->file_flags |= CAMDD_FF_CAN_SEEK;
1188
1189                         if (ioctl(fd, DIOCGSECTORSIZE, &sector_size) == -1) {
1190                                 err(1, "DIOCGSECTORSIZE ioctl failed on %s",
1191                                     dev->device_name);
1192                         }
1193
1194                         if (sector_size == 0) {
1195                                 errx(1, "DIOCGSECTORSIZE ioctl returned "
1196                                     "invalid sector size %u for %s",
1197                                     sector_size, dev->device_name);
1198                         }
1199
1200                         if (ioctl(fd, DIOCGMEDIASIZE, &media_size) == -1) {
1201                                 err(1, "DIOCGMEDIASIZE ioctl failed on %s",
1202                                     dev->device_name);
1203                         }
1204
1205                         if (media_size == 0) {
1206                                 errx(1, "DIOCGMEDIASIZE ioctl returned "
1207                                     "invalid media size %ju for %s",
1208                                     (uintmax_t)media_size, dev->device_name);
1209                         }
1210
1211                         if (dev->blocksize % sector_size) {
1212                                 errx(1, "%s blocksize %u not a multiple of "
1213                                     "sector size %u", dev->device_name,
1214                                     dev->blocksize, sector_size);
1215                         }
1216
1217                         dev->sector_size = sector_size;
1218                         dev->max_sector = (media_size / sector_size) - 1;
1219                         break;
1220                 }
1221                 case CAMDD_FILE_MEM:
1222                         file_dev->file_flags |= CAMDD_FF_CAN_SEEK;
1223                         break;
1224                 default:
1225                         break;
1226                 }
1227         }
1228
1229         if ((io_opts->offset != 0)
1230          && ((file_dev->file_flags & CAMDD_FF_CAN_SEEK) == 0)) {
1231                 warnx("Offset %ju specified for %s, but we cannot seek on %s",
1232                     io_opts->offset, io_opts->dev_name, io_opts->dev_name);
1233                 goto bailout_error;
1234         }
1235 #if 0
1236         else if ((io_opts->offset != 0)
1237                 && ((io_opts->offset % dev->sector_size) != 0)) {
1238                 warnx("Offset %ju for %s is not a multiple of the "
1239                       "sector size %u", io_opts->offset, 
1240                       io_opts->dev_name, dev->sector_size);
1241                 goto bailout_error;
1242         } else {
1243                 dev->start_offset_bytes = io_opts->offset;
1244         }
1245 #endif
1246
1247 bailout:
1248         return (dev);
1249
1250 bailout_error:
1251         camdd_free_dev(dev);
1252         return (NULL);
1253 }
1254
1255 /*
1256  * Need to implement this.  Do a basic probe:
1257  * - Check the inquiry data, make sure we're talking to a device that we
1258  *   can reasonably expect to talk to -- direct, RBC, CD, WORM.
1259  * - Send a test unit ready, make sure the device is available.
1260  * - Get the capacity and block size.
1261  */
1262 struct camdd_dev *
1263 camdd_probe_pass(struct cam_device *cam_dev, struct camdd_io_opts *io_opts,
1264                  camdd_argmask arglist, int probe_retry_count,
1265                  int probe_timeout, int io_retry_count, int io_timeout)
1266 {
1267         union ccb *ccb;
1268         uint64_t maxsector;
1269         uint32_t cpi_maxio, max_iosize, pass_numblocks;
1270         uint32_t block_len;
1271         struct scsi_read_capacity_data rcap;
1272         struct scsi_read_capacity_data_long rcaplong;
1273         struct camdd_dev *dev;
1274         struct camdd_dev_pass *pass_dev;
1275         struct kevent ke;
1276         int scsi_dev_type;
1277
1278         dev = NULL;
1279
1280         scsi_dev_type = SID_TYPE(&cam_dev->inq_data);
1281         maxsector = 0;
1282         block_len = 0;
1283
1284         /*
1285          * For devices that support READ CAPACITY, we'll attempt to get the
1286          * capacity.  Otherwise, we really don't support tape or other
1287          * devices via SCSI passthrough, so just return an error in that case.
1288          */
1289         switch (scsi_dev_type) {
1290         case T_DIRECT:
1291         case T_WORM:
1292         case T_CDROM:
1293         case T_OPTICAL:
1294         case T_RBC:
1295                 break;
1296         default:
1297                 errx(1, "Unsupported SCSI device type %d", scsi_dev_type);
1298                 break; /*NOTREACHED*/
1299         }
1300
1301         ccb = cam_getccb(cam_dev);
1302
1303         if (ccb == NULL) {
1304                 warnx("%s: error allocating ccb", __func__);
1305                 goto bailout;
1306         }
1307
1308         CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio);
1309
1310         scsi_read_capacity(&ccb->csio,
1311                            /*retries*/ probe_retry_count,
1312                            /*cbfcnp*/ NULL,
1313                            /*tag_action*/ MSG_SIMPLE_Q_TAG,
1314                            &rcap,
1315                            SSD_FULL_SIZE,
1316                            /*timeout*/ probe_timeout ? probe_timeout : 5000);
1317
1318         /* Disable freezing the device queue */
1319         ccb->ccb_h.flags |= CAM_DEV_QFRZDIS;
1320
1321         if (arglist & CAMDD_ARG_ERR_RECOVER)
1322                 ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER;
1323
1324         if (cam_send_ccb(cam_dev, ccb) < 0) {
1325                 warn("error sending READ CAPACITY command");
1326
1327                 cam_error_print(cam_dev, ccb, CAM_ESF_ALL,
1328                                 CAM_EPF_ALL, stderr);
1329
1330                 goto bailout;
1331         }
1332
1333         if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1334                 cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr);
1335                 goto bailout;
1336         }
1337
1338         maxsector = scsi_4btoul(rcap.addr);
1339         block_len = scsi_4btoul(rcap.length);
1340
1341         /*
1342          * A last block of 2^32-1 means that the true capacity is over 2TB,
1343          * and we need to issue the long READ CAPACITY to get the real
1344          * capacity.  Otherwise, we're all set.
1345          */
1346         if (maxsector != 0xffffffff)
1347                 goto rcap_done;
1348
1349         scsi_read_capacity_16(&ccb->csio,
1350                               /*retries*/ probe_retry_count,
1351                               /*cbfcnp*/ NULL,
1352                               /*tag_action*/ MSG_SIMPLE_Q_TAG,
1353                               /*lba*/ 0,
1354                               /*reladdr*/ 0,
1355                               /*pmi*/ 0,
1356                               (uint8_t *)&rcaplong,
1357                               sizeof(rcaplong),
1358                               /*sense_len*/ SSD_FULL_SIZE,
1359                               /*timeout*/ probe_timeout ? probe_timeout : 5000);
1360
1361         /* Disable freezing the device queue */
1362         ccb->ccb_h.flags |= CAM_DEV_QFRZDIS;
1363
1364         if (arglist & CAMDD_ARG_ERR_RECOVER)
1365                 ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER;
1366
1367         if (cam_send_ccb(cam_dev, ccb) < 0) {
1368                 warn("error sending READ CAPACITY (16) command");
1369                 cam_error_print(cam_dev, ccb, CAM_ESF_ALL,
1370                                 CAM_EPF_ALL, stderr);
1371                 goto bailout;
1372         }
1373
1374         if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1375                 cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr);
1376                 goto bailout;
1377         }
1378
1379         maxsector = scsi_8btou64(rcaplong.addr);
1380         block_len = scsi_4btoul(rcaplong.length);
1381
1382 rcap_done:
1383         if (block_len == 0) {
1384                 warnx("Sector size for %s%u is 0, cannot continue",
1385                     cam_dev->device_name, cam_dev->dev_unit_num);
1386                 goto bailout_error;
1387         }
1388
1389         CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->cpi);
1390
1391         ccb->ccb_h.func_code = XPT_PATH_INQ;
1392         ccb->ccb_h.flags = CAM_DIR_NONE;
1393         ccb->ccb_h.retry_count = 1;
1394         
1395         if (cam_send_ccb(cam_dev, ccb) < 0) {
1396                 warn("error sending XPT_PATH_INQ CCB");
1397
1398                 cam_error_print(cam_dev, ccb, CAM_ESF_ALL,
1399                                 CAM_EPF_ALL, stderr);
1400                 goto bailout;
1401         }
1402
1403         EV_SET(&ke, cam_dev->fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0);
1404
1405         dev = camdd_alloc_dev(CAMDD_DEV_PASS, &ke, 1, io_retry_count,
1406                               io_timeout);
1407         if (dev == NULL)
1408                 goto bailout;
1409
1410         pass_dev = &dev->dev_spec.pass;
1411         pass_dev->scsi_dev_type = scsi_dev_type;
1412         pass_dev->dev = cam_dev;
1413         pass_dev->max_sector = maxsector;
1414         pass_dev->block_len = block_len;
1415         pass_dev->cpi_maxio = ccb->cpi.maxio;
1416         snprintf(dev->device_name, sizeof(dev->device_name), "%s%u",
1417                  pass_dev->dev->device_name, pass_dev->dev->dev_unit_num);
1418         dev->sector_size = block_len;
1419         dev->max_sector = maxsector;
1420         
1421
1422         /*
1423          * Determine the optimal blocksize to use for this device.
1424          */
1425
1426         /*
1427          * If the controller has not specified a maximum I/O size,
1428          * just go with 128K as a somewhat conservative value.
1429          */
1430         if (pass_dev->cpi_maxio == 0)
1431                 cpi_maxio = 131072;
1432         else
1433                 cpi_maxio = pass_dev->cpi_maxio;
1434
1435         /*
1436          * If the controller has a large maximum I/O size, limit it
1437          * to something smaller so that the kernel doesn't have trouble
1438          * allocating buffers to copy data in and out for us.
1439          * XXX KDM this is until we have unmapped I/O support in the kernel.
1440          */
1441         max_iosize = min(cpi_maxio, CAMDD_PASS_MAX_BLOCK);
1442
1443         /*
1444          * If we weren't able to get a block size for some reason,
1445          * default to 512 bytes.
1446          */
1447         block_len = pass_dev->block_len;
1448         if (block_len == 0)
1449                 block_len = 512;
1450
1451         /*
1452          * Figure out how many blocksize chunks will fit in the
1453          * maximum I/O size.
1454          */
1455         pass_numblocks = max_iosize / block_len;
1456
1457         /*
1458          * And finally, multiple the number of blocks by the LBA
1459          * length to get our maximum block size;
1460          */
1461         dev->blocksize = pass_numblocks * block_len;
1462
1463         if (io_opts->blocksize != 0) {
1464                 if ((io_opts->blocksize % dev->sector_size) != 0) {
1465                         warnx("Blocksize %ju for %s is not a multiple of "
1466                               "sector size %u", (uintmax_t)io_opts->blocksize, 
1467                               dev->device_name, dev->sector_size);
1468                         goto bailout_error;
1469                 }
1470                 dev->blocksize = io_opts->blocksize;
1471         }
1472         dev->target_queue_depth = CAMDD_PASS_DEFAULT_DEPTH;
1473         if (io_opts->queue_depth != 0)
1474                 dev->target_queue_depth = io_opts->queue_depth;
1475
1476         if (io_opts->offset != 0) {
1477                 if (io_opts->offset > (dev->max_sector * dev->sector_size)) {
1478                         warnx("Offset %ju is past the end of device %s",
1479                             io_opts->offset, dev->device_name);
1480                         goto bailout_error;
1481                 }
1482 #if 0
1483                 else if ((io_opts->offset % dev->sector_size) != 0) {
1484                         warnx("Offset %ju for %s is not a multiple of the "
1485                               "sector size %u", io_opts->offset, 
1486                               dev->device_name, dev->sector_size);
1487                         goto bailout_error;
1488                 }
1489                 dev->start_offset_bytes = io_opts->offset;
1490 #endif
1491         }
1492
1493         dev->min_cmd_size = io_opts->min_cmd_size;
1494
1495         dev->run = camdd_pass_run;
1496         dev->fetch = camdd_pass_fetch;
1497
1498 bailout:
1499         cam_freeccb(ccb);
1500
1501         return (dev);
1502
1503 bailout_error:
1504         cam_freeccb(ccb);
1505
1506         camdd_free_dev(dev);
1507
1508         return (NULL);
1509 }
1510
1511 void *
1512 camdd_worker(void *arg)
1513 {
1514         struct camdd_dev *dev = arg;
1515         struct camdd_buf *buf;
1516         struct timespec ts, *kq_ts;
1517
1518         ts.tv_sec = 0;
1519         ts.tv_nsec = 0;
1520
1521         pthread_mutex_lock(&dev->mutex);
1522
1523         dev->flags |= CAMDD_DEV_FLAG_ACTIVE;
1524
1525         for (;;) {
1526                 struct kevent ke;
1527                 int retval = 0;
1528
1529                 /*
1530                  * XXX KDM check the reorder queue depth?
1531                  */
1532                 if (dev->write_dev == 0) {
1533                         uint32_t our_depth, peer_depth, peer_bytes, our_bytes;
1534                         uint32_t target_depth = dev->target_queue_depth;
1535                         uint32_t peer_target_depth =
1536                             dev->peer_dev->target_queue_depth;
1537                         uint32_t peer_blocksize = dev->peer_dev->blocksize;
1538
1539                         camdd_get_depth(dev, &our_depth, &peer_depth,
1540                                         &our_bytes, &peer_bytes);
1541
1542 #if 0
1543                         while (((our_depth < target_depth)
1544                              && (peer_depth < peer_target_depth))
1545                             || ((peer_bytes + our_bytes) <
1546                                  (peer_blocksize * 2))) {
1547 #endif
1548                         while (((our_depth + peer_depth) <
1549                                 (target_depth + peer_target_depth))
1550                             || ((peer_bytes + our_bytes) <
1551                                 (peer_blocksize * 3))) {
1552
1553                                 retval = camdd_queue(dev, NULL);
1554                                 if (retval == 1)
1555                                         break;
1556                                 else if (retval != 0) {
1557                                         error_exit = 1;
1558                                         goto bailout;
1559                                 }
1560
1561                                 camdd_get_depth(dev, &our_depth, &peer_depth,
1562                                                 &our_bytes, &peer_bytes);
1563                         }
1564                 }
1565                 /*
1566                  * See if we have any I/O that is ready to execute.
1567                  */
1568                 buf = STAILQ_FIRST(&dev->run_queue);
1569                 if (buf != NULL) {
1570                         while (dev->target_queue_depth > dev->cur_active_io) {
1571                                 retval = dev->run(dev);
1572                                 if (retval == -1) {
1573                                         dev->flags |= CAMDD_DEV_FLAG_EOF;
1574                                         error_exit = 1;
1575                                         break;
1576                                 } else if (retval != 0) {
1577                                         break;
1578                                 }
1579                         }
1580                 }
1581
1582                 /*
1583                  * We've reached EOF, or our partner has reached EOF.
1584                  */
1585                 if ((dev->flags & CAMDD_DEV_FLAG_EOF)
1586                  || (dev->flags & CAMDD_DEV_FLAG_PEER_EOF)) {
1587                         if (dev->write_dev != 0) {
1588                                 if ((STAILQ_EMPTY(&dev->work_queue))
1589                                  && (dev->num_run_queue == 0)
1590                                  && (dev->cur_active_io == 0)) {
1591                                         goto bailout;
1592                                 }
1593                         } else {
1594                                 /*
1595                                  * If we're the reader, and the writer
1596                                  * got EOF, he is already done.  If we got
1597                                  * the EOF, then we need to wait until
1598                                  * everything is flushed out for the writer.
1599                                  */
1600                                 if (dev->flags & CAMDD_DEV_FLAG_PEER_EOF) {
1601                                         goto bailout;
1602                                 } else if ((dev->num_peer_work_queue == 0)
1603                                         && (dev->num_peer_done_queue == 0)
1604                                         && (dev->cur_active_io == 0)
1605                                         && (dev->num_run_queue == 0)) {
1606                                         goto bailout;
1607                                 }
1608                         }
1609                         /*
1610                          * XXX KDM need to do something about the pending
1611                          * queue and cleanup resources.
1612                          */
1613                 } 
1614
1615                 if ((dev->write_dev == 0)
1616                  && (dev->cur_active_io == 0)
1617                  && (dev->peer_bytes_queued < dev->peer_dev->blocksize))
1618                         kq_ts = &ts;
1619                 else
1620                         kq_ts = NULL;
1621
1622                 /*
1623                  * Run kevent to see if there are events to process.
1624                  */
1625                 pthread_mutex_unlock(&dev->mutex);
1626                 retval = kevent(dev->kq, NULL, 0, &ke, 1, kq_ts);
1627                 pthread_mutex_lock(&dev->mutex);
1628                 if (retval == -1) {
1629                         warn("%s: error returned from kevent",__func__);
1630                         goto bailout;
1631                 } else if (retval != 0) {
1632                         switch (ke.filter) {
1633                         case EVFILT_READ:
1634                                 if (dev->fetch != NULL) {
1635                                         retval = dev->fetch(dev);
1636                                         if (retval == -1) {
1637                                                 error_exit = 1;
1638                                                 goto bailout;
1639                                         }
1640                                 }
1641                                 break;
1642                         case EVFILT_SIGNAL:
1643                                 /*
1644                                  * We register for this so we don't get
1645                                  * an error as a result of a SIGINFO or a
1646                                  * SIGINT.  It will actually get handled
1647                                  * by the signal handler.  If we get a
1648                                  * SIGINT, bail out without printing an
1649                                  * error message.  Any other signals 
1650                                  * will result in the error message above.
1651                                  */
1652                                 if (ke.ident == SIGINT)
1653                                         goto bailout;
1654                                 break;
1655                         case EVFILT_USER:
1656                                 retval = 0;
1657                                 /*
1658                                  * Check to see if the other thread has
1659                                  * queued any I/O for us to do.  (In this
1660                                  * case we're the writer.)
1661                                  */
1662                                 for (buf = STAILQ_FIRST(&dev->work_queue);
1663                                      buf != NULL;
1664                                      buf = STAILQ_FIRST(&dev->work_queue)) {
1665                                         STAILQ_REMOVE_HEAD(&dev->work_queue,
1666                                                            work_links);
1667                                         retval = camdd_queue(dev, buf);
1668                                         /*
1669                                          * We keep going unless we get an
1670                                          * actual error.  If we get EOF, we
1671                                          * still want to remove the buffers
1672                                          * from the queue and send the back
1673                                          * to the reader thread.
1674                                          */
1675                                         if (retval == -1) {
1676                                                 error_exit = 1;
1677                                                 goto bailout;
1678                                         } else
1679                                                 retval = 0;
1680                                 }
1681
1682                                 /*
1683                                  * Next check to see if the other thread has
1684                                  * queued any completed buffers back to us.
1685                                  * (In this case we're the reader.)
1686                                  */
1687                                 for (buf = STAILQ_FIRST(&dev->peer_done_queue);
1688                                      buf != NULL;
1689                                      buf = STAILQ_FIRST(&dev->peer_done_queue)){
1690                                         STAILQ_REMOVE_HEAD(
1691                                             &dev->peer_done_queue, work_links);
1692                                         dev->num_peer_done_queue--;
1693                                         camdd_peer_done(buf);
1694                                 }
1695                                 break;
1696                         default:
1697                                 warnx("%s: unknown kevent filter %d",
1698                                       __func__, ke.filter);
1699                                 break;
1700                         }
1701                 }
1702         }
1703
1704 bailout:
1705
1706         dev->flags &= ~CAMDD_DEV_FLAG_ACTIVE;
1707
1708         /* XXX KDM cleanup resources here? */
1709
1710         pthread_mutex_unlock(&dev->mutex);
1711
1712         need_exit = 1;
1713         sem_post(&camdd_sem);
1714
1715         return (NULL);
1716 }
1717
1718 /*
1719  * Simplistic translation of CCB status to our local status.
1720  */
1721 camdd_buf_status
1722 camdd_ccb_status(union ccb *ccb)
1723 {
1724         camdd_buf_status status = CAMDD_STATUS_NONE;
1725         cam_status ccb_status;
1726
1727         ccb_status = ccb->ccb_h.status & CAM_STATUS_MASK;
1728
1729         switch (ccb_status) {
1730         case CAM_REQ_CMP: {
1731                 if (ccb->csio.resid == 0) {
1732                         status = CAMDD_STATUS_OK;
1733                 } else if (ccb->csio.dxfer_len > ccb->csio.resid) {
1734                         status = CAMDD_STATUS_SHORT_IO;
1735                 } else {
1736                         status = CAMDD_STATUS_EOF;
1737                 }
1738                 break;
1739         }
1740         case CAM_SCSI_STATUS_ERROR: {
1741                 switch (ccb->csio.scsi_status) {
1742                 case SCSI_STATUS_OK:
1743                 case SCSI_STATUS_COND_MET:
1744                 case SCSI_STATUS_INTERMED:
1745                 case SCSI_STATUS_INTERMED_COND_MET:
1746                         status = CAMDD_STATUS_OK;
1747                         break;
1748                 case SCSI_STATUS_CMD_TERMINATED:
1749                 case SCSI_STATUS_CHECK_COND:
1750                 case SCSI_STATUS_QUEUE_FULL:
1751                 case SCSI_STATUS_BUSY:
1752                 case SCSI_STATUS_RESERV_CONFLICT:
1753                 default:
1754                         status = CAMDD_STATUS_ERROR;
1755                         break;
1756                 }
1757                 break;
1758         }
1759         default:
1760                 status = CAMDD_STATUS_ERROR;
1761                 break;
1762         }
1763
1764         return (status);
1765 }
1766
1767 /*
1768  * Queue a buffer to our peer's work thread for writing.
1769  *
1770  * Returns 0 for success, -1 for failure, 1 if the other thread exited.
1771  */
1772 int
1773 camdd_queue_peer_buf(struct camdd_dev *dev, struct camdd_buf *buf)
1774 {
1775         struct kevent ke;
1776         STAILQ_HEAD(, camdd_buf) local_queue;
1777         struct camdd_buf *buf1, *buf2;
1778         struct camdd_buf_data *data = NULL;
1779         uint64_t peer_bytes_queued = 0;
1780         int active = 1;
1781         int retval = 0;
1782
1783         STAILQ_INIT(&local_queue);
1784
1785         /*
1786          * Since we're the reader, we need to queue our I/O to the writer
1787          * in sequential order in order to make sure it gets written out
1788          * in sequential order.
1789          *
1790          * Check the next expected I/O starting offset.  If this doesn't
1791          * match, put it on the reorder queue.
1792          */
1793         if ((buf->lba * dev->sector_size) != dev->next_completion_pos_bytes) {
1794
1795                 /*
1796                  * If there is nothing on the queue, there is no sorting
1797                  * needed.
1798                  */
1799                 if (STAILQ_EMPTY(&dev->reorder_queue)) {
1800                         STAILQ_INSERT_TAIL(&dev->reorder_queue, buf, links);
1801                         dev->num_reorder_queue++;
1802                         goto bailout;
1803                 }
1804
1805                 /*
1806                  * Sort in ascending order by starting LBA.  There should
1807                  * be no identical LBAs.
1808                  */
1809                 for (buf1 = STAILQ_FIRST(&dev->reorder_queue); buf1 != NULL;
1810                      buf1 = buf2) {
1811                         buf2 = STAILQ_NEXT(buf1, links);
1812                         if (buf->lba < buf1->lba) {
1813                                 /*
1814                                  * If we're less than the first one, then
1815                                  * we insert at the head of the list
1816                                  * because this has to be the first element
1817                                  * on the list.
1818                                  */
1819                                 STAILQ_INSERT_HEAD(&dev->reorder_queue,
1820                                                    buf, links);
1821                                 dev->num_reorder_queue++;
1822                                 break;
1823                         } else if (buf->lba > buf1->lba) {
1824                                 if (buf2 == NULL) {
1825                                         STAILQ_INSERT_TAIL(&dev->reorder_queue, 
1826                                             buf, links);
1827                                         dev->num_reorder_queue++;
1828                                         break;
1829                                 } else if (buf->lba < buf2->lba) {
1830                                         STAILQ_INSERT_AFTER(&dev->reorder_queue,
1831                                             buf1, buf, links);
1832                                         dev->num_reorder_queue++;
1833                                         break;
1834                                 }
1835                         } else {
1836                                 errx(1, "Found buffers with duplicate LBA %ju!",
1837                                      buf->lba);
1838                         }
1839                 }
1840                 goto bailout;
1841         } else {
1842
1843                 /*
1844                  * We're the next expected I/O completion, so put ourselves
1845                  * on the local queue to be sent to the writer.  We use
1846                  * work_links here so that we can queue this to the 
1847                  * peer_work_queue before taking the buffer off of the
1848                  * local_queue.
1849                  */
1850                 dev->next_completion_pos_bytes += buf->len;
1851                 STAILQ_INSERT_TAIL(&local_queue, buf, work_links);
1852
1853                 /*
1854                  * Go through the reorder queue looking for more sequential
1855                  * I/O and add it to the local queue.
1856                  */
1857                 for (buf1 = STAILQ_FIRST(&dev->reorder_queue); buf1 != NULL;
1858                      buf1 = STAILQ_FIRST(&dev->reorder_queue)) {
1859                         /*
1860                          * As soon as we see an I/O that is out of sequence,
1861                          * we're done.
1862                          */
1863                         if ((buf1->lba * dev->sector_size) !=
1864                              dev->next_completion_pos_bytes)
1865                                 break;
1866
1867                         STAILQ_REMOVE_HEAD(&dev->reorder_queue, links);
1868                         dev->num_reorder_queue--;
1869                         STAILQ_INSERT_TAIL(&local_queue, buf1, work_links);
1870                         dev->next_completion_pos_bytes += buf1->len;
1871                 }
1872         }
1873
1874         /*
1875          * Setup the event to let the other thread know that it has work
1876          * pending.
1877          */
1878         EV_SET(&ke, (uintptr_t)&dev->peer_dev->work_queue, EVFILT_USER, 0,
1879                NOTE_TRIGGER, 0, NULL);
1880
1881         /*
1882          * Put this on our shadow queue so that we know what we've queued
1883          * to the other thread.
1884          */
1885         STAILQ_FOREACH_SAFE(buf1, &local_queue, work_links, buf2) {
1886                 if (buf1->buf_type != CAMDD_BUF_DATA) {
1887                         errx(1, "%s: should have a data buffer, not an "
1888                             "indirect buffer", __func__);
1889                 }
1890                 data = &buf1->buf_type_spec.data;
1891
1892                 /*
1893                  * We only need to send one EOF to the writer, and don't
1894                  * need to continue sending EOFs after that.
1895                  */
1896                 if (buf1->status == CAMDD_STATUS_EOF) {
1897                         if (dev->flags & CAMDD_DEV_FLAG_EOF_SENT) {
1898                                 STAILQ_REMOVE(&local_queue, buf1, camdd_buf,
1899                                     work_links);
1900                                 camdd_release_buf(buf1);
1901                                 retval = 1;
1902                                 continue;
1903                         }
1904                         dev->flags |= CAMDD_DEV_FLAG_EOF_SENT;
1905                 }
1906
1907
1908                 STAILQ_INSERT_TAIL(&dev->peer_work_queue, buf1, links);
1909                 peer_bytes_queued += (data->fill_len - data->resid);
1910                 dev->peer_bytes_queued += (data->fill_len - data->resid);
1911                 dev->num_peer_work_queue++;
1912         }
1913
1914         if (STAILQ_FIRST(&local_queue) == NULL)
1915                 goto bailout;
1916
1917         /*
1918          * Drop our mutex and pick up the other thread's mutex.  We need to
1919          * do this to avoid deadlocks.
1920          */
1921         pthread_mutex_unlock(&dev->mutex);
1922         pthread_mutex_lock(&dev->peer_dev->mutex);
1923
1924         if (dev->peer_dev->flags & CAMDD_DEV_FLAG_ACTIVE) {
1925                 /*
1926                  * Put the buffers on the other thread's incoming work queue.
1927                  */
1928                 for (buf1 = STAILQ_FIRST(&local_queue); buf1 != NULL;
1929                      buf1 = STAILQ_FIRST(&local_queue)) {
1930                         STAILQ_REMOVE_HEAD(&local_queue, work_links);
1931                         STAILQ_INSERT_TAIL(&dev->peer_dev->work_queue, buf1,
1932                                            work_links);
1933                 }
1934                 /*
1935                  * Send an event to the other thread's kqueue to let it know
1936                  * that there is something on the work queue.
1937                  */
1938                 retval = kevent(dev->peer_dev->kq, &ke, 1, NULL, 0, NULL);
1939                 if (retval == -1)
1940                         warn("%s: unable to add peer work_queue kevent",
1941                              __func__);
1942                 else
1943                         retval = 0;
1944         } else
1945                 active = 0;
1946
1947         pthread_mutex_unlock(&dev->peer_dev->mutex);
1948         pthread_mutex_lock(&dev->mutex);
1949
1950         /*
1951          * If the other side isn't active, run through the queue and
1952          * release all of the buffers.
1953          */
1954         if (active == 0) {
1955                 for (buf1 = STAILQ_FIRST(&local_queue); buf1 != NULL;
1956                      buf1 = STAILQ_FIRST(&local_queue)) {
1957                         STAILQ_REMOVE_HEAD(&local_queue, work_links);
1958                         STAILQ_REMOVE(&dev->peer_work_queue, buf1, camdd_buf,
1959                                       links);
1960                         dev->num_peer_work_queue--;
1961                         camdd_release_buf(buf1);
1962                 }
1963                 dev->peer_bytes_queued -= peer_bytes_queued;
1964                 retval = 1;
1965         }
1966
1967 bailout:
1968         return (retval);
1969 }
1970
1971 /*
1972  * Return a buffer to the reader thread when we have completed writing it.
1973  */
1974 int
1975 camdd_complete_peer_buf(struct camdd_dev *dev, struct camdd_buf *peer_buf)
1976 {
1977         struct kevent ke;
1978         int retval = 0;
1979
1980         /*
1981          * Setup the event to let the other thread know that we have
1982          * completed a buffer.
1983          */
1984         EV_SET(&ke, (uintptr_t)&dev->peer_dev->peer_done_queue, EVFILT_USER, 0,
1985                NOTE_TRIGGER, 0, NULL);
1986
1987         /*
1988          * Drop our lock and acquire the other thread's lock before
1989          * manipulating 
1990          */
1991         pthread_mutex_unlock(&dev->mutex);
1992         pthread_mutex_lock(&dev->peer_dev->mutex);
1993
1994         /*
1995          * Put the buffer on the reader thread's peer done queue now that
1996          * we have completed it.
1997          */
1998         STAILQ_INSERT_TAIL(&dev->peer_dev->peer_done_queue, peer_buf,
1999                            work_links);
2000         dev->peer_dev->num_peer_done_queue++;
2001
2002         /*
2003          * Send an event to the peer thread to let it know that we've added
2004          * something to its peer done queue.
2005          */
2006         retval = kevent(dev->peer_dev->kq, &ke, 1, NULL, 0, NULL);
2007         if (retval == -1)
2008                 warn("%s: unable to add peer_done_queue kevent", __func__);
2009         else
2010                 retval = 0;
2011
2012         /*
2013          * Drop the other thread's lock and reacquire ours.
2014          */
2015         pthread_mutex_unlock(&dev->peer_dev->mutex);
2016         pthread_mutex_lock(&dev->mutex);
2017
2018         return (retval);
2019 }
2020
2021 /*
2022  * Free a buffer that was written out by the writer thread and returned to
2023  * the reader thread.
2024  */
2025 void
2026 camdd_peer_done(struct camdd_buf *buf)
2027 {
2028         struct camdd_dev *dev;
2029         struct camdd_buf_data *data;
2030
2031         dev = buf->dev;
2032         if (buf->buf_type != CAMDD_BUF_DATA) {
2033                 errx(1, "%s: should have a data buffer, not an "
2034                     "indirect buffer", __func__);
2035         }
2036
2037         data = &buf->buf_type_spec.data;
2038
2039         STAILQ_REMOVE(&dev->peer_work_queue, buf, camdd_buf, links);
2040         dev->num_peer_work_queue--;
2041         dev->peer_bytes_queued -= (data->fill_len - data->resid);
2042
2043         if (buf->status == CAMDD_STATUS_EOF)
2044                 dev->flags |= CAMDD_DEV_FLAG_PEER_EOF;
2045
2046         STAILQ_INSERT_TAIL(&dev->free_queue, buf, links);
2047 }
2048
2049 /*
2050  * Assumes caller holds the lock for this device.
2051  */
2052 void
2053 camdd_complete_buf(struct camdd_dev *dev, struct camdd_buf *buf,
2054                    int *error_count)
2055 {
2056         int retval = 0;
2057
2058         /*
2059          * If we're the reader, we need to send the completed I/O
2060          * to the writer.  If we're the writer, we need to just
2061          * free up resources, or let the reader know if we've
2062          * encountered an error.
2063          */
2064         if (dev->write_dev == 0) {
2065                 retval = camdd_queue_peer_buf(dev, buf);
2066                 if (retval != 0)
2067                         (*error_count)++;
2068         } else {
2069                 struct camdd_buf *tmp_buf, *next_buf;
2070
2071                 STAILQ_FOREACH_SAFE(tmp_buf, &buf->src_list, src_links,
2072                                     next_buf) {
2073                         struct camdd_buf *src_buf;
2074                         struct camdd_buf_indirect *indirect;
2075
2076                         STAILQ_REMOVE(&buf->src_list, tmp_buf,
2077                                       camdd_buf, src_links);
2078
2079                         tmp_buf->status = buf->status;
2080
2081                         if (tmp_buf->buf_type == CAMDD_BUF_DATA) {
2082                                 camdd_complete_peer_buf(dev, tmp_buf);
2083                                 continue;
2084                         }
2085
2086                         indirect = &tmp_buf->buf_type_spec.indirect;
2087                         src_buf = indirect->src_buf;
2088                         src_buf->refcount--;
2089                         /*
2090                          * XXX KDM we probably need to account for
2091                          * exactly how many bytes we were able to
2092                          * write.  Allocate the residual to the
2093                          * first N buffers?  Or just track the
2094                          * number of bytes written?  Right now the reader
2095                          * doesn't do anything with a residual.
2096                          */
2097                         src_buf->status = buf->status;
2098                         if (src_buf->refcount <= 0)
2099                                 camdd_complete_peer_buf(dev, src_buf);
2100                         STAILQ_INSERT_TAIL(&dev->free_indirect_queue,
2101                                            tmp_buf, links);
2102                 }
2103
2104                 STAILQ_INSERT_TAIL(&dev->free_queue, buf, links);
2105         }
2106 }
2107
2108 /*
2109  * Fetch all completed commands from the pass(4) device.
2110  *
2111  * Returns the number of commands received, or -1 if any of the commands
2112  * completed with an error.  Returns 0 if no commands are available.
2113  */
2114 int
2115 camdd_pass_fetch(struct camdd_dev *dev)
2116 {
2117         struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass;
2118         union ccb ccb;
2119         int retval = 0, num_fetched = 0, error_count = 0;
2120
2121         pthread_mutex_unlock(&dev->mutex);
2122         /*
2123          * XXX KDM we don't distinguish between EFAULT and ENOENT.
2124          */
2125         while ((retval = ioctl(pass_dev->dev->fd, CAMIOGET, &ccb)) != -1) {
2126                 struct camdd_buf *buf;
2127                 struct camdd_buf_data *data;
2128                 cam_status ccb_status;
2129                 union ccb *buf_ccb;
2130
2131                 buf = ccb.ccb_h.ccb_buf;
2132                 data = &buf->buf_type_spec.data;
2133                 buf_ccb = &data->ccb;
2134
2135                 num_fetched++;
2136
2137                 /*
2138                  * Copy the CCB back out so we get status, sense data, etc.
2139                  */
2140                 bcopy(&ccb, buf_ccb, sizeof(ccb));
2141
2142                 pthread_mutex_lock(&dev->mutex);
2143
2144                 /*
2145                  * We're now done, so take this off the active queue.
2146                  */
2147                 STAILQ_REMOVE(&dev->active_queue, buf, camdd_buf, links);
2148                 dev->cur_active_io--;
2149
2150                 ccb_status = ccb.ccb_h.status & CAM_STATUS_MASK;
2151                 if (ccb_status != CAM_REQ_CMP) {
2152                         cam_error_print(pass_dev->dev, &ccb, CAM_ESF_ALL,
2153                                         CAM_EPF_ALL, stderr);
2154                 }
2155
2156                 data->resid = ccb.csio.resid;
2157                 dev->bytes_transferred += (ccb.csio.dxfer_len - ccb.csio.resid);
2158
2159                 if (buf->status == CAMDD_STATUS_NONE)
2160                         buf->status = camdd_ccb_status(&ccb);
2161                 if (buf->status == CAMDD_STATUS_ERROR)
2162                         error_count++;
2163                 else if (buf->status == CAMDD_STATUS_EOF) {
2164                         /*
2165                          * Once we queue this buffer to our partner thread,
2166                          * he will know that we've hit EOF.
2167                          */
2168                         dev->flags |= CAMDD_DEV_FLAG_EOF;
2169                 }
2170
2171                 camdd_complete_buf(dev, buf, &error_count);
2172
2173                 /*
2174                  * Unlock in preparation for the ioctl call.
2175                  */
2176                 pthread_mutex_unlock(&dev->mutex);
2177         }
2178
2179         pthread_mutex_lock(&dev->mutex);
2180
2181         if (error_count > 0)
2182                 return (-1);
2183         else
2184                 return (num_fetched);
2185 }
2186
2187 /*
2188  * Returns -1 for error, 0 for success/continue, and 1 for resource
2189  * shortage/stop processing.
2190  */
2191 int
2192 camdd_file_run(struct camdd_dev *dev)
2193 {
2194         struct camdd_dev_file *file_dev = &dev->dev_spec.file;
2195         struct camdd_buf_data *data;
2196         struct camdd_buf *buf;
2197         off_t io_offset;
2198         int retval = 0, write_dev = dev->write_dev;
2199         int error_count = 0, no_resources = 0, double_buf_needed = 0;
2200         uint32_t num_sectors = 0, db_len = 0;
2201
2202         buf = STAILQ_FIRST(&dev->run_queue);
2203         if (buf == NULL) {
2204                 no_resources = 1;
2205                 goto bailout;
2206         } else if ((dev->write_dev == 0)
2207                 && (dev->flags & (CAMDD_DEV_FLAG_EOF |
2208                                   CAMDD_DEV_FLAG_EOF_SENT))) {
2209                 STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links);
2210                 dev->num_run_queue--;
2211                 buf->status = CAMDD_STATUS_EOF;
2212                 error_count++;
2213                 goto bailout;
2214         }
2215
2216         /*
2217          * If we're writing, we need to go through the source buffer list
2218          * and create an S/G list.
2219          */
2220         if (write_dev != 0) {
2221                 retval = camdd_buf_sg_create(buf, /*iovec*/ 1,
2222                     dev->sector_size, &num_sectors, &double_buf_needed);
2223                 if (retval != 0) {
2224                         no_resources = 1;
2225                         goto bailout;
2226                 }
2227         }
2228
2229         STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links);
2230         dev->num_run_queue--;
2231
2232         data = &buf->buf_type_spec.data;
2233
2234         /*
2235          * pread(2) and pwrite(2) offsets are byte offsets.
2236          */
2237         io_offset = buf->lba * dev->sector_size;
2238
2239         /*
2240          * Unlock the mutex while we read or write.
2241          */
2242         pthread_mutex_unlock(&dev->mutex);
2243
2244         /*
2245          * Note that we don't need to double buffer if we're the reader
2246          * because in that case, we have allocated a single buffer of
2247          * sufficient size to do the read.  This copy is necessary on
2248          * writes because if one of the components of the S/G list is not
2249          * a sector size multiple, the kernel will reject the write.  This
2250          * is unfortunate but not surprising.  So this will make sure that
2251          * we're using a single buffer that is a multiple of the sector size.
2252          */
2253         if ((double_buf_needed != 0)
2254          && (data->sg_count > 1)
2255          && (write_dev != 0)) {
2256                 uint32_t cur_offset;
2257                 int i;
2258
2259                 if (file_dev->tmp_buf == NULL)
2260                         file_dev->tmp_buf = calloc(dev->blocksize, 1);
2261                 if (file_dev->tmp_buf == NULL) {
2262                         buf->status = CAMDD_STATUS_ERROR;
2263                         error_count++;
2264                         goto bailout;
2265                 }
2266                 for (i = 0, cur_offset = 0; i < data->sg_count; i++) {
2267                         bcopy(data->iovec[i].iov_base,
2268                             &file_dev->tmp_buf[cur_offset],
2269                             data->iovec[i].iov_len);
2270                         cur_offset += data->iovec[i].iov_len;
2271                 }
2272                 db_len = cur_offset;
2273         }
2274
2275         if (file_dev->file_flags & CAMDD_FF_CAN_SEEK) {
2276                 if (write_dev == 0) {
2277                         /*
2278                          * XXX KDM is there any way we would need a S/G
2279                          * list here?
2280                          */
2281                         retval = pread(file_dev->fd, data->buf,
2282                             buf->len, io_offset);
2283                 } else {
2284                         if (double_buf_needed != 0) {
2285                                 retval = pwrite(file_dev->fd, file_dev->tmp_buf,
2286                                     db_len, io_offset);
2287                         } else if (data->sg_count == 0) {
2288                                 retval = pwrite(file_dev->fd, data->buf,
2289                                     data->fill_len, io_offset);
2290                         } else {
2291                                 retval = pwritev(file_dev->fd, data->iovec,
2292                                     data->sg_count, io_offset);
2293                         }
2294                 }
2295         } else {
2296                 if (write_dev == 0) {
2297                         /*
2298                          * XXX KDM is there any way we would need a S/G
2299                          * list here?
2300                          */
2301                         retval = read(file_dev->fd, data->buf, buf->len);
2302                 } else {
2303                         if (double_buf_needed != 0) {
2304                                 retval = write(file_dev->fd, file_dev->tmp_buf,
2305                                     db_len);
2306                         } else if (data->sg_count == 0) {
2307                                 retval = write(file_dev->fd, data->buf,
2308                                     data->fill_len);
2309                         } else {
2310                                 retval = writev(file_dev->fd, data->iovec,
2311                                     data->sg_count);
2312                         }
2313                 }
2314         }
2315
2316         /* We're done, re-acquire the lock */
2317         pthread_mutex_lock(&dev->mutex);
2318
2319         if (retval >= (ssize_t)data->fill_len) {
2320                 /*
2321                  * If the bytes transferred is more than the request size,
2322                  * that indicates an overrun, which should only happen at
2323                  * the end of a transfer if we have to round up to a sector
2324                  * boundary.
2325                  */
2326                 if (buf->status == CAMDD_STATUS_NONE)
2327                         buf->status = CAMDD_STATUS_OK;
2328                 data->resid = 0;
2329                 dev->bytes_transferred += retval;
2330         } else if (retval == -1) {
2331                 warn("Error %s %s", (write_dev) ? "writing to" :
2332                     "reading from", file_dev->filename);
2333
2334                 buf->status = CAMDD_STATUS_ERROR;
2335                 data->resid = data->fill_len;
2336                 error_count++;
2337
2338                 if (dev->debug == 0)
2339                         goto bailout;
2340
2341                 if ((double_buf_needed != 0)
2342                  && (write_dev != 0)) {
2343                         fprintf(stderr, "%s: fd %d, DB buf %p, len %u lba %ju "
2344                             "offset %ju\n", __func__, file_dev->fd,
2345                             file_dev->tmp_buf, db_len, (uintmax_t)buf->lba,
2346                             (uintmax_t)io_offset);
2347                 } else if (data->sg_count == 0) {
2348                         fprintf(stderr, "%s: fd %d, buf %p, len %u, lba %ju "
2349                             "offset %ju\n", __func__, file_dev->fd, data->buf,
2350                             data->fill_len, (uintmax_t)buf->lba,
2351                             (uintmax_t)io_offset);
2352                 } else {
2353                         int i;
2354
2355                         fprintf(stderr, "%s: fd %d, len %u, lba %ju "
2356                             "offset %ju\n", __func__, file_dev->fd, 
2357                             data->fill_len, (uintmax_t)buf->lba,
2358                             (uintmax_t)io_offset);
2359
2360                         for (i = 0; i < data->sg_count; i++) {
2361                                 fprintf(stderr, "index %d ptr %p len %zu\n",
2362                                     i, data->iovec[i].iov_base,
2363                                     data->iovec[i].iov_len);
2364                         }
2365                 }
2366         } else if (retval == 0) {
2367                 buf->status = CAMDD_STATUS_EOF;
2368                 if (dev->debug != 0)
2369                         printf("%s: got EOF from %s!\n", __func__,
2370                             file_dev->filename);
2371                 data->resid = data->fill_len;
2372                 error_count++;
2373         } else if (retval < (ssize_t)data->fill_len) {
2374                 if (buf->status == CAMDD_STATUS_NONE)
2375                         buf->status = CAMDD_STATUS_SHORT_IO;
2376                 data->resid = data->fill_len - retval;
2377                 dev->bytes_transferred += retval;
2378         }
2379
2380 bailout:
2381         if (buf != NULL) {
2382                 if (buf->status == CAMDD_STATUS_EOF) {
2383                         struct camdd_buf *buf2;
2384                         dev->flags |= CAMDD_DEV_FLAG_EOF;
2385                         STAILQ_FOREACH(buf2, &dev->run_queue, links)
2386                                 buf2->status = CAMDD_STATUS_EOF;
2387                 }
2388
2389                 camdd_complete_buf(dev, buf, &error_count);
2390         }
2391
2392         if (error_count != 0)
2393                 return (-1);
2394         else if (no_resources != 0)
2395                 return (1);
2396         else
2397                 return (0);
2398 }
2399
2400 /*
2401  * Execute one command from the run queue.  Returns 0 for success, 1 for
2402  * stop processing, and -1 for error.
2403  */
2404 int
2405 camdd_pass_run(struct camdd_dev *dev)
2406 {
2407         struct camdd_buf *buf = NULL;
2408         struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass;
2409         struct camdd_buf_data *data;
2410         uint32_t num_blocks, sectors_used = 0;
2411         union ccb *ccb;
2412         int retval = 0, is_write = dev->write_dev;
2413         int double_buf_needed = 0;
2414
2415         buf = STAILQ_FIRST(&dev->run_queue);
2416         if (buf == NULL) {
2417                 retval = 1;
2418                 goto bailout;
2419         }
2420
2421         /*
2422          * If we're writing, we need to go through the source buffer list
2423          * and create an S/G list.
2424          */
2425         if (is_write != 0) {
2426                 retval = camdd_buf_sg_create(buf, /*iovec*/ 0,dev->sector_size,
2427                     &sectors_used, &double_buf_needed);
2428                 if (retval != 0) {
2429                         retval = -1;
2430                         goto bailout;
2431                 }
2432         }
2433
2434         STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links);
2435         dev->num_run_queue--;
2436
2437         data = &buf->buf_type_spec.data;
2438
2439         ccb = &data->ccb;
2440         CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio);
2441
2442         /*
2443          * In almost every case the number of blocks should be the device
2444          * block size.  The exception may be at the end of an I/O stream
2445          * for a partial block or at the end of a device.
2446          */
2447         if (is_write != 0)
2448                 num_blocks = sectors_used;
2449         else
2450                 num_blocks = data->fill_len / pass_dev->block_len;
2451
2452         scsi_read_write(&ccb->csio,
2453                         /*retries*/ dev->retry_count,
2454                         /*cbfcnp*/ NULL,
2455                         /*tag_action*/ MSG_SIMPLE_Q_TAG,
2456                         /*readop*/ (dev->write_dev == 0) ? SCSI_RW_READ :
2457                                    SCSI_RW_WRITE,
2458                         /*byte2*/ 0,
2459                         /*minimum_cmd_size*/ dev->min_cmd_size,
2460                         /*lba*/ buf->lba,
2461                         /*block_count*/ num_blocks,
2462                         /*data_ptr*/ (data->sg_count != 0) ?
2463                                      (uint8_t *)data->segs : data->buf,
2464                         /*dxfer_len*/ (num_blocks * pass_dev->block_len),
2465                         /*sense_len*/ SSD_FULL_SIZE,
2466                         /*timeout*/ dev->io_timeout);
2467
2468         /* Disable freezing the device queue */
2469         ccb->ccb_h.flags |= CAM_DEV_QFRZDIS;
2470
2471         if (dev->retry_count != 0)
2472                 ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER;
2473
2474         if (data->sg_count != 0) {
2475                 ccb->csio.sglist_cnt = data->sg_count;
2476                 ccb->ccb_h.flags |= CAM_DATA_SG;
2477         }
2478
2479         /*
2480          * Store a pointer to the buffer in the CCB.  The kernel will
2481          * restore this when we get it back, and we'll use it to identify
2482          * the buffer this CCB came from.
2483          */
2484         ccb->ccb_h.ccb_buf = buf;
2485
2486         /*
2487          * Unlock our mutex in preparation for issuing the ioctl.
2488          */
2489         pthread_mutex_unlock(&dev->mutex);
2490         /*
2491          * Queue the CCB to the pass(4) driver.
2492          */
2493         if (ioctl(pass_dev->dev->fd, CAMIOQUEUE, ccb) == -1) {
2494                 pthread_mutex_lock(&dev->mutex);
2495
2496                 warn("%s: error sending CAMIOQUEUE ioctl to %s%u", __func__,
2497                      pass_dev->dev->device_name, pass_dev->dev->dev_unit_num);
2498                 warn("%s: CCB address is %p", __func__, ccb);
2499                 retval = -1;
2500
2501                 STAILQ_INSERT_TAIL(&dev->free_queue, buf, links);
2502         } else {
2503                 pthread_mutex_lock(&dev->mutex);
2504
2505                 dev->cur_active_io++;
2506                 STAILQ_INSERT_TAIL(&dev->active_queue, buf, links);
2507         }
2508
2509 bailout:
2510         return (retval);
2511 }
2512
2513 int
2514 camdd_get_next_lba_len(struct camdd_dev *dev, uint64_t *lba, ssize_t *len)
2515 {
2516         struct camdd_dev_pass *pass_dev;
2517         uint32_t num_blocks;
2518         int retval = 0;
2519
2520         pass_dev = &dev->dev_spec.pass;
2521
2522         *lba = dev->next_io_pos_bytes / dev->sector_size;
2523         *len = dev->blocksize;
2524         num_blocks = *len / dev->sector_size;
2525
2526         /*
2527          * If max_sector is 0, then we have no set limit.  This can happen
2528          * if we're writing to a file in a filesystem, or reading from
2529          * something like /dev/zero.
2530          */
2531         if ((dev->max_sector != 0)
2532          || (dev->sector_io_limit != 0)) {
2533                 uint64_t max_sector;
2534
2535                 if ((dev->max_sector != 0)
2536                  && (dev->sector_io_limit != 0)) 
2537                         max_sector = min(dev->sector_io_limit, dev->max_sector);
2538                 else if (dev->max_sector != 0)
2539                         max_sector = dev->max_sector;
2540                 else
2541                         max_sector = dev->sector_io_limit;
2542
2543
2544                 /*
2545                  * Check to see whether we're starting off past the end of
2546                  * the device.  If so, we need to just send an EOF      
2547                  * notification to the writer.
2548                  */
2549                 if (*lba > max_sector) {
2550                         *len = 0;
2551                         retval = 1;
2552                 } else if (((*lba + num_blocks) > max_sector + 1)
2553                         || ((*lba + num_blocks) < *lba)) {
2554                         /*
2555                          * If we get here (but pass the first check), we
2556                          * can trim the request length down to go to the
2557                          * end of the device.
2558                          */
2559                         num_blocks = (max_sector + 1) - *lba;
2560                         *len = num_blocks * dev->sector_size;
2561                         retval = 1;
2562                 }
2563         }
2564
2565         dev->next_io_pos_bytes += *len;
2566
2567         return (retval);
2568 }
2569
2570 /*
2571  * Returns 0 for success, 1 for EOF detected, and -1 for failure.
2572  */
2573 int
2574 camdd_queue(struct camdd_dev *dev, struct camdd_buf *read_buf)
2575 {
2576         struct camdd_buf *buf = NULL;
2577         struct camdd_buf_data *data;
2578         struct camdd_dev_pass *pass_dev;
2579         size_t new_len;
2580         struct camdd_buf_data *rb_data;
2581         int is_write = dev->write_dev;
2582         int eof_flush_needed = 0;
2583         int retval = 0;
2584         int error;
2585
2586         pass_dev = &dev->dev_spec.pass;
2587
2588         /*
2589          * If we've gotten EOF or our partner has, we should not continue
2590          * queueing I/O.  If we're a writer, though, we should continue
2591          * to write any buffers that don't have EOF status.
2592          */
2593         if ((dev->flags & CAMDD_DEV_FLAG_EOF)
2594          || ((dev->flags & CAMDD_DEV_FLAG_PEER_EOF)
2595           && (is_write == 0))) {
2596                 /*
2597                  * Tell the worker thread that we have seen EOF.
2598                  */
2599                 retval = 1;
2600
2601                 /*
2602                  * If we're the writer, send the buffer back with EOF status.
2603                  */
2604                 if (is_write) {
2605                         read_buf->status = CAMDD_STATUS_EOF;
2606                         
2607                         error = camdd_complete_peer_buf(dev, read_buf);
2608                 }
2609                 goto bailout;
2610         }
2611
2612         if (is_write == 0) {
2613                 buf = camdd_get_buf(dev, CAMDD_BUF_DATA);
2614                 if (buf == NULL) {
2615                         retval = -1;
2616                         goto bailout;
2617                 }
2618                 data = &buf->buf_type_spec.data;
2619
2620                 retval = camdd_get_next_lba_len(dev, &buf->lba, &buf->len);
2621                 if (retval != 0) {
2622                         buf->status = CAMDD_STATUS_EOF;
2623
2624                         if ((buf->len == 0)
2625                          && ((dev->flags & (CAMDD_DEV_FLAG_EOF_SENT |
2626                              CAMDD_DEV_FLAG_EOF_QUEUED)) != 0)) {
2627                                 camdd_release_buf(buf);
2628                                 goto bailout;
2629                         }
2630                         dev->flags |= CAMDD_DEV_FLAG_EOF_QUEUED;
2631                 }
2632
2633                 data->fill_len = buf->len;
2634                 data->src_start_offset = buf->lba * dev->sector_size;
2635
2636                 /*
2637                  * Put this on the run queue.
2638                  */
2639                 STAILQ_INSERT_TAIL(&dev->run_queue, buf, links);
2640                 dev->num_run_queue++;
2641
2642                 /* We're done. */
2643                 goto bailout;
2644         }
2645
2646         /*
2647          * Check for new EOF status from the reader.
2648          */
2649         if ((read_buf->status == CAMDD_STATUS_EOF)
2650          || (read_buf->status == CAMDD_STATUS_ERROR)) {
2651                 dev->flags |= CAMDD_DEV_FLAG_PEER_EOF;
2652                 if ((STAILQ_FIRST(&dev->pending_queue) == NULL)
2653                  && (read_buf->len == 0)) {
2654                         camdd_complete_peer_buf(dev, read_buf);
2655                         retval = 1;
2656                         goto bailout;
2657                 } else
2658                         eof_flush_needed = 1;
2659         }
2660
2661         /*
2662          * See if we have a buffer we're composing with pieces from our
2663          * partner thread.
2664          */
2665         buf = STAILQ_FIRST(&dev->pending_queue);
2666         if (buf == NULL) {
2667                 uint64_t lba;
2668                 ssize_t len;
2669
2670                 retval = camdd_get_next_lba_len(dev, &lba, &len);
2671                 if (retval != 0) {
2672                         read_buf->status = CAMDD_STATUS_EOF;
2673
2674                         if (len == 0) {
2675                                 dev->flags |= CAMDD_DEV_FLAG_EOF;
2676                                 error = camdd_complete_peer_buf(dev, read_buf);
2677                                 goto bailout;
2678                         }
2679                 }
2680
2681                 /*
2682                  * If we don't have a pending buffer, we need to grab a new
2683                  * one from the free list or allocate another one.
2684                  */
2685                 buf = camdd_get_buf(dev, CAMDD_BUF_DATA);
2686                 if (buf == NULL) {
2687                         retval = 1;
2688                         goto bailout;
2689                 }
2690
2691                 buf->lba = lba;
2692                 buf->len = len;
2693
2694                 STAILQ_INSERT_TAIL(&dev->pending_queue, buf, links);
2695                 dev->num_pending_queue++;
2696         }
2697
2698         data = &buf->buf_type_spec.data;
2699
2700         rb_data = &read_buf->buf_type_spec.data;
2701
2702         if ((rb_data->src_start_offset != dev->next_peer_pos_bytes)
2703          && (dev->debug != 0)) {
2704                 printf("%s: WARNING: reader offset %#jx != expected offset "
2705                     "%#jx\n", __func__, (uintmax_t)rb_data->src_start_offset,
2706                     (uintmax_t)dev->next_peer_pos_bytes);
2707         }
2708         dev->next_peer_pos_bytes = rb_data->src_start_offset +
2709             (rb_data->fill_len - rb_data->resid);
2710
2711         new_len = (rb_data->fill_len - rb_data->resid) + data->fill_len;
2712         if (new_len < buf->len) {
2713                 /*
2714                  * There are three cases here:
2715                  * 1. We need more data to fill up a block, so we put 
2716                  *    this I/O on the queue and wait for more I/O.
2717                  * 2. We have a pending buffer in the queue that is
2718                  *    smaller than our blocksize, but we got an EOF.  So we
2719                  *    need to go ahead and flush the write out.
2720                  * 3. We got an error.
2721                  */
2722
2723                 /*
2724                  * Increment our fill length.
2725                  */
2726                 data->fill_len += (rb_data->fill_len - rb_data->resid);
2727
2728                 /*
2729                  * Add the new read buffer to the list for writing.
2730                  */
2731                 STAILQ_INSERT_TAIL(&buf->src_list, read_buf, src_links);
2732
2733                 /* Increment the count */
2734                 buf->src_count++;
2735
2736                 if (eof_flush_needed == 0) {
2737                         /*
2738                          * We need to exit, because we don't have enough
2739                          * data yet.
2740                          */
2741                         goto bailout;
2742                 } else {
2743                         /*
2744                          * Take the buffer off of the pending queue.
2745                          */
2746                         STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf,
2747                                       links);
2748                         dev->num_pending_queue--;
2749
2750                         /*
2751                          * If we need an EOF flush, but there is no data
2752                          * to flush, go ahead and return this buffer.
2753                          */
2754                         if (data->fill_len == 0) {
2755                                 camdd_complete_buf(dev, buf, /*error_count*/0);
2756                                 retval = 1;
2757                                 goto bailout;
2758                         }
2759
2760                         /*
2761                          * Put this on the next queue for execution.
2762                          */
2763                         STAILQ_INSERT_TAIL(&dev->run_queue, buf, links);
2764                         dev->num_run_queue++;
2765                 }
2766         } else if (new_len == buf->len) {
2767                 /*
2768                  * We have enough data to completey fill one block,
2769                  * so we're ready to issue the I/O.
2770                  */
2771
2772                 /*
2773                  * Take the buffer off of the pending queue.
2774                  */
2775                 STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf, links);
2776                 dev->num_pending_queue--;
2777
2778                 /*
2779                  * Add the new read buffer to the list for writing.
2780                  */
2781                 STAILQ_INSERT_TAIL(&buf->src_list, read_buf, src_links);
2782
2783                 /* Increment the count */
2784                 buf->src_count++;
2785
2786                 /*
2787                  * Increment our fill length.
2788                  */
2789                 data->fill_len += (rb_data->fill_len - rb_data->resid);
2790
2791                 /*
2792                  * Put this on the next queue for execution.
2793                  */
2794                 STAILQ_INSERT_TAIL(&dev->run_queue, buf, links);
2795                 dev->num_run_queue++;
2796         } else {
2797                 struct camdd_buf *idb;
2798                 struct camdd_buf_indirect *indirect;
2799                 uint32_t len_to_go, cur_offset;
2800
2801                 
2802                 idb = camdd_get_buf(dev, CAMDD_BUF_INDIRECT);
2803                 if (idb == NULL) {
2804                         retval = 1;
2805                         goto bailout;
2806                 }
2807                 indirect = &idb->buf_type_spec.indirect;
2808                 indirect->src_buf = read_buf;
2809                 read_buf->refcount++;
2810                 indirect->offset = 0;
2811                 indirect->start_ptr = rb_data->buf;
2812                 /*
2813                  * We've already established that there is more
2814                  * data in read_buf than we have room for in our
2815                  * current write request.  So this particular chunk
2816                  * of the request should just be the remainder
2817                  * needed to fill up a block.
2818                  */
2819                 indirect->len = buf->len - (data->fill_len - data->resid);
2820
2821                 camdd_buf_add_child(buf, idb);
2822
2823                 /*
2824                  * This buffer is ready to execute, so we can take
2825                  * it off the pending queue and put it on the run
2826                  * queue.
2827                  */
2828                 STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf,
2829                               links);
2830                 dev->num_pending_queue--;
2831                 STAILQ_INSERT_TAIL(&dev->run_queue, buf, links);
2832                 dev->num_run_queue++;
2833
2834                 cur_offset = indirect->offset + indirect->len;
2835
2836                 /*
2837                  * The resulting I/O would be too large to fit in
2838                  * one block.  We need to split this I/O into
2839                  * multiple pieces.  Allocate as many buffers as needed.
2840                  */
2841                 for (len_to_go = rb_data->fill_len - rb_data->resid -
2842                      indirect->len; len_to_go > 0;) {
2843                         struct camdd_buf *new_buf;
2844                         struct camdd_buf_data *new_data;
2845                         uint64_t lba;
2846                         ssize_t len;
2847
2848                         retval = camdd_get_next_lba_len(dev, &lba, &len);
2849                         if ((retval != 0)
2850                          && (len == 0)) {
2851                                 /*
2852                                  * The device has already been marked
2853                                  * as EOF, and there is no space left.
2854                                  */
2855                                 goto bailout;
2856                         }
2857
2858                         new_buf = camdd_get_buf(dev, CAMDD_BUF_DATA);
2859                         if (new_buf == NULL) {
2860                                 retval = 1;
2861                                 goto bailout;
2862                         }
2863
2864                         new_buf->lba = lba;
2865                         new_buf->len = len;
2866
2867                         idb = camdd_get_buf(dev, CAMDD_BUF_INDIRECT);
2868                         if (idb == NULL) {
2869                                 retval = 1;
2870                                 goto bailout;
2871                         }
2872
2873                         indirect = &idb->buf_type_spec.indirect;
2874
2875                         indirect->src_buf = read_buf;
2876                         read_buf->refcount++;
2877                         indirect->offset = cur_offset;
2878                         indirect->start_ptr = rb_data->buf + cur_offset;
2879                         indirect->len = min(len_to_go, new_buf->len);
2880 #if 0
2881                         if (((indirect->len % dev->sector_size) != 0)
2882                          || ((indirect->offset % dev->sector_size) != 0)) {
2883                                 warnx("offset %ju len %ju not aligned with "
2884                                     "sector size %u", indirect->offset,
2885                                     (uintmax_t)indirect->len, dev->sector_size);
2886                         }
2887 #endif
2888                         cur_offset += indirect->len;
2889                         len_to_go -= indirect->len;
2890
2891                         camdd_buf_add_child(new_buf, idb);
2892
2893                         new_data = &new_buf->buf_type_spec.data;
2894
2895                         if ((new_data->fill_len == new_buf->len)
2896                          || (eof_flush_needed != 0)) {
2897                                 STAILQ_INSERT_TAIL(&dev->run_queue,
2898                                                    new_buf, links);
2899                                 dev->num_run_queue++;
2900                         } else if (new_data->fill_len < buf->len) {
2901                                 STAILQ_INSERT_TAIL(&dev->pending_queue,
2902                                                 new_buf, links);
2903                                 dev->num_pending_queue++;
2904                         } else {
2905                                 warnx("%s: too much data in new "
2906                                       "buffer!", __func__);
2907                                 retval = 1;
2908                                 goto bailout;
2909                         }
2910                 }
2911         }
2912
2913 bailout:
2914         return (retval);
2915 }
2916
2917 void
2918 camdd_get_depth(struct camdd_dev *dev, uint32_t *our_depth,
2919                 uint32_t *peer_depth, uint32_t *our_bytes, uint32_t *peer_bytes)
2920 {
2921         *our_depth = dev->cur_active_io + dev->num_run_queue;
2922         if (dev->num_peer_work_queue >
2923             dev->num_peer_done_queue)
2924                 *peer_depth = dev->num_peer_work_queue -
2925                               dev->num_peer_done_queue;
2926         else
2927                 *peer_depth = 0;
2928         *our_bytes = *our_depth * dev->blocksize;
2929         *peer_bytes = dev->peer_bytes_queued;
2930 }
2931
2932 void
2933 camdd_sig_handler(int sig)
2934 {
2935         if (sig == SIGINFO)
2936                 need_status = 1;
2937         else {
2938                 need_exit = 1;
2939                 error_exit = 1;
2940         }
2941
2942         sem_post(&camdd_sem);
2943 }
2944
2945 void
2946 camdd_print_status(struct camdd_dev *camdd_dev, struct camdd_dev *other_dev, 
2947                    struct timespec *start_time)
2948 {
2949         struct timespec done_time;
2950         uint64_t total_ns;
2951         long double mb_sec, total_sec;
2952         int error = 0;
2953
2954         error = clock_gettime(CLOCK_MONOTONIC_PRECISE, &done_time);
2955         if (error != 0) {
2956                 warn("Unable to get done time");
2957                 return;
2958         }
2959
2960         timespecsub(&done_time, start_time);
2961         
2962         total_ns = done_time.tv_nsec + (done_time.tv_sec * 1000000000);
2963         total_sec = total_ns;
2964         total_sec /= 1000000000;
2965
2966         fprintf(stderr, "%ju bytes %s %s\n%ju bytes %s %s\n"
2967                 "%.4Lf seconds elapsed\n",
2968                 (uintmax_t)camdd_dev->bytes_transferred,
2969                 (camdd_dev->write_dev == 0) ?  "read from" : "written to",
2970                 camdd_dev->device_name,
2971                 (uintmax_t)other_dev->bytes_transferred,
2972                 (other_dev->write_dev == 0) ? "read from" : "written to",
2973                 other_dev->device_name, total_sec);
2974
2975         mb_sec = min(other_dev->bytes_transferred,camdd_dev->bytes_transferred);
2976         mb_sec /= 1024 * 1024;
2977         mb_sec *= 1000000000;
2978         mb_sec /= total_ns;
2979         fprintf(stderr, "%.2Lf MB/sec\n", mb_sec);
2980 }
2981
2982 int
2983 camdd_rw(struct camdd_io_opts *io_opts, int num_io_opts, uint64_t max_io,
2984          int retry_count, int timeout)
2985 {
2986         char *device = NULL;
2987         struct cam_device *new_cam_dev = NULL;
2988         struct camdd_dev *devs[2];
2989         struct timespec start_time;
2990         pthread_t threads[2];
2991         int unit = 0;
2992         int error = 0;
2993         int i;
2994
2995         if (num_io_opts != 2) {
2996                 warnx("Must have one input and one output path");
2997                 error = 1;
2998                 goto bailout;
2999         }
3000
3001         bzero(devs, sizeof(devs));
3002
3003         for (i = 0; i < num_io_opts; i++) {
3004                 switch (io_opts[i].dev_type) {
3005                 case CAMDD_DEV_PASS: {
3006                         camdd_argmask new_arglist = CAMDD_ARG_NONE;
3007                         int bus = 0, target = 0, lun = 0;
3008                         char name[30];
3009                         int rv;
3010
3011                         if (isdigit(io_opts[i].dev_name[0])) {
3012                                 /* device specified as bus:target[:lun] */
3013                                 rv = parse_btl(io_opts[i].dev_name, &bus,
3014                                     &target, &lun, &new_arglist);
3015                                 if (rv < 2) {
3016                                         warnx("numeric device specification "
3017                                              "must be either bus:target, or "
3018                                              "bus:target:lun");
3019                                         error = 1;
3020                                         goto bailout;
3021                                 }
3022                                 /* default to 0 if lun was not specified */
3023                                 if ((new_arglist & CAMDD_ARG_LUN) == 0) {
3024                                         lun = 0;
3025                                         new_arglist |= CAMDD_ARG_LUN;
3026                                 }
3027                         } else {
3028                                 if (cam_get_device(io_opts[i].dev_name, name,
3029                                                    sizeof name, &unit) == -1) {
3030                                         warnx("%s", cam_errbuf);
3031                                         error = 1;
3032                                         goto bailout;
3033                                 }
3034                                 device = strdup(name);
3035                                 new_arglist |= CAMDD_ARG_DEVICE |CAMDD_ARG_UNIT;
3036                         }
3037
3038                         if (new_arglist & (CAMDD_ARG_BUS | CAMDD_ARG_TARGET))
3039                                 new_cam_dev = cam_open_btl(bus, target, lun,
3040                                     O_RDWR, NULL);
3041                         else
3042                                 new_cam_dev = cam_open_spec_device(device, unit,
3043                                     O_RDWR, NULL);
3044                         if (new_cam_dev == NULL) {
3045                                 warnx("%s", cam_errbuf);
3046                                 error = 1;
3047                                 goto bailout;
3048                         }
3049
3050                         devs[i] = camdd_probe_pass(new_cam_dev,
3051                             /*io_opts*/ &io_opts[i],
3052                             CAMDD_ARG_ERR_RECOVER, 
3053                             /*probe_retry_count*/ 3,
3054                             /*probe_timeout*/ 5000,
3055                             /*io_retry_count*/ retry_count,
3056                             /*io_timeout*/ timeout);
3057                         if (devs[i] == NULL) {
3058                                 warn("Unable to probe device %s%u",
3059                                      new_cam_dev->device_name,
3060                                      new_cam_dev->dev_unit_num);
3061                                 error = 1;
3062                                 goto bailout;
3063                         }
3064                         break;
3065                 }
3066                 case CAMDD_DEV_FILE: {
3067                         int fd = -1;
3068
3069                         if (io_opts[i].dev_name[0] == '-') {
3070                                 if (io_opts[i].write_dev != 0)
3071                                         fd = STDOUT_FILENO;
3072                                 else
3073                                         fd = STDIN_FILENO;
3074                         } else {
3075                                 if (io_opts[i].write_dev != 0) {
3076                                         fd = open(io_opts[i].dev_name,
3077                                             O_RDWR | O_CREAT, S_IWUSR |S_IRUSR);
3078                                 } else {
3079                                         fd = open(io_opts[i].dev_name,
3080                                             O_RDONLY);
3081                                 }
3082                         }
3083                         if (fd == -1) {
3084                                 warn("error opening file %s",
3085                                     io_opts[i].dev_name);
3086                                 error = 1;
3087                                 goto bailout;
3088                         }
3089
3090                         devs[i] = camdd_probe_file(fd, &io_opts[i],
3091                             retry_count, timeout);
3092                         if (devs[i] == NULL) {
3093                                 error = 1;
3094                                 goto bailout;
3095                         }
3096
3097                         break;
3098                 }
3099                 default:
3100                         warnx("Unknown device type %d (%s)",
3101                             io_opts[i].dev_type, io_opts[i].dev_name);
3102                         error = 1;
3103                         goto bailout;
3104                         break; /*NOTREACHED */
3105                 }
3106
3107                 devs[i]->write_dev = io_opts[i].write_dev;
3108
3109                 devs[i]->start_offset_bytes = io_opts[i].offset;
3110
3111                 if (max_io != 0) {
3112                         devs[i]->sector_io_limit =
3113                             (devs[i]->start_offset_bytes /
3114                             devs[i]->sector_size) +
3115                             (max_io / devs[i]->sector_size) - 1;
3116                         devs[i]->sector_io_limit =
3117                             (devs[i]->start_offset_bytes /
3118                             devs[i]->sector_size) +
3119                             (max_io / devs[i]->sector_size) - 1;
3120                 }
3121
3122                 devs[i]->next_io_pos_bytes = devs[i]->start_offset_bytes;
3123                 devs[i]->next_completion_pos_bytes =devs[i]->start_offset_bytes;
3124         }
3125
3126         devs[0]->peer_dev = devs[1];
3127         devs[1]->peer_dev = devs[0];
3128         devs[0]->next_peer_pos_bytes = devs[0]->peer_dev->next_io_pos_bytes;
3129         devs[1]->next_peer_pos_bytes = devs[1]->peer_dev->next_io_pos_bytes;
3130
3131         sem_init(&camdd_sem, /*pshared*/ 0, 0);
3132
3133         signal(SIGINFO, camdd_sig_handler);
3134         signal(SIGINT, camdd_sig_handler);
3135
3136         error = clock_gettime(CLOCK_MONOTONIC_PRECISE, &start_time);
3137         if (error != 0) {
3138                 warn("Unable to get start time");
3139                 goto bailout;
3140         }
3141
3142         for (i = 0; i < num_io_opts; i++) {
3143                 error = pthread_create(&threads[i], NULL, camdd_worker,
3144                                        (void *)devs[i]);
3145                 if (error != 0) {
3146                         warnc(error, "pthread_create() failed");
3147                         goto bailout;
3148                 }
3149         }
3150
3151         for (;;) {
3152                 if ((sem_wait(&camdd_sem) == -1)
3153                  || (need_exit != 0)) {
3154                         struct kevent ke;
3155
3156                         for (i = 0; i < num_io_opts; i++) {
3157                                 EV_SET(&ke, (uintptr_t)&devs[i]->work_queue,
3158                                     EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL);
3159
3160                                 devs[i]->flags |= CAMDD_DEV_FLAG_EOF;
3161
3162                                 error = kevent(devs[i]->kq, &ke, 1, NULL, 0,
3163                                                 NULL);
3164                                 if (error == -1)
3165                                         warn("%s: unable to wake up thread",
3166                                             __func__);
3167                                 error = 0;
3168                         }
3169                         break;
3170                 } else if (need_status != 0) {
3171                         camdd_print_status(devs[0], devs[1], &start_time);
3172                         need_status = 0;
3173                 }
3174         } 
3175         for (i = 0; i < num_io_opts; i++) {
3176                 pthread_join(threads[i], NULL);
3177         }
3178
3179         camdd_print_status(devs[0], devs[1], &start_time);
3180
3181 bailout:
3182
3183         for (i = 0; i < num_io_opts; i++)
3184                 camdd_free_dev(devs[i]);
3185
3186         return (error + error_exit);
3187 }
3188
3189 void
3190 usage(void)
3191 {
3192         fprintf(stderr,
3193 "usage:  camdd <-i|-o pass=pass0,bs=1M,offset=1M,depth=4>\n"
3194 "              <-i|-o file=/tmp/file,bs=512K,offset=1M>\n"
3195 "              <-i|-o file=/dev/da0,bs=512K,offset=1M>\n"
3196 "              <-i|-o file=/dev/nsa0,bs=512K>\n"
3197 "              [-C retry_count][-E][-m max_io_amt][-t timeout_secs][-v][-h]\n"
3198 "Option description\n"
3199 "-i <arg=val>  Specify input device/file and parameters\n"
3200 "-o <arg=val>  Specify output device/file and parameters\n"
3201 "Input and Output parameters\n"
3202 "pass=name     Specify a pass(4) device like pass0 or /dev/pass0\n"
3203 "file=name     Specify a file or device, /tmp/foo, /dev/da0, /dev/null\n"
3204 "              or - for stdin/stdout\n"
3205 "bs=blocksize  Specify blocksize in bytes, or using K, M, G, etc. suffix\n"
3206 "offset=len    Specify starting offset in bytes or using K, M, G suffix\n"
3207 "              NOTE: offset cannot be specified on tapes, pipes, stdin/out\n"
3208 "depth=N       Specify a numeric queue depth.  This only applies to pass(4)\n"
3209 "mcs=N         Specify a minimum cmd size for pass(4) read/write commands\n"
3210 "Optional arguments\n"
3211 "-C retry_cnt  Specify a retry count for pass(4) devices\n"
3212 "-E            Enable CAM error recovery for pass(4) devices\n"
3213 "-m max_io     Specify the maximum amount to be transferred in bytes or\n"
3214 "              using K, G, M, etc. suffixes\n"
3215 "-t timeout    Specify the I/O timeout to use with pass(4) devices\n"
3216 "-v            Enable verbose error recovery\n"
3217 "-h            Print this message\n");
3218 }
3219
3220
3221 int
3222 camdd_parse_io_opts(char *args, int is_write, struct camdd_io_opts *io_opts)
3223 {
3224         char *tmpstr, *tmpstr2;
3225         char *orig_tmpstr = NULL;
3226         int retval = 0;
3227
3228         io_opts->write_dev = is_write;
3229
3230         tmpstr = strdup(args);
3231         if (tmpstr == NULL) {
3232                 warn("strdup failed");
3233                 retval = 1;
3234                 goto bailout;
3235         }
3236         orig_tmpstr = tmpstr;
3237         while ((tmpstr2 = strsep(&tmpstr, ",")) != NULL) {
3238                 char *name, *value;
3239
3240                 /*
3241                  * If the user creates an empty parameter by putting in two
3242                  * commas, skip over it and look for the next field.
3243                  */
3244                 if (*tmpstr2 == '\0')
3245                         continue;
3246
3247                 name = strsep(&tmpstr2, "=");
3248                 if (*name == '\0') {
3249                         warnx("Got empty I/O parameter name");
3250                         retval = 1;
3251                         goto bailout;
3252                 }
3253                 value = strsep(&tmpstr2, "=");
3254                 if ((value == NULL)
3255                  || (*value == '\0')) {
3256                         warnx("Empty I/O parameter value for %s", name);
3257                         retval = 1;
3258                         goto bailout;
3259                 }
3260                 if (strncasecmp(name, "file", 4) == 0) {
3261                         io_opts->dev_type = CAMDD_DEV_FILE;
3262                         io_opts->dev_name = strdup(value);
3263                         if (io_opts->dev_name == NULL) {
3264                                 warn("Error allocating memory");
3265                                 retval = 1;
3266                                 goto bailout;
3267                         }
3268                 } else if (strncasecmp(name, "pass", 4) == 0) {
3269                         io_opts->dev_type = CAMDD_DEV_PASS;
3270                         io_opts->dev_name = strdup(value);
3271                         if (io_opts->dev_name == NULL) {
3272                                 warn("Error allocating memory");
3273                                 retval = 1;
3274                                 goto bailout;
3275                         }
3276                 } else if ((strncasecmp(name, "bs", 2) == 0)
3277                         || (strncasecmp(name, "blocksize", 9) == 0)) {
3278                         retval = expand_number(value, &io_opts->blocksize);
3279                         if (retval == -1) {
3280                                 warn("expand_number(3) failed on %s=%s", name,
3281                                     value);
3282                                 retval = 1;
3283                                 goto bailout;
3284                         }
3285                 } else if (strncasecmp(name, "depth", 5) == 0) {
3286                         char *endptr;
3287
3288                         io_opts->queue_depth = strtoull(value, &endptr, 0);
3289                         if (*endptr != '\0') {
3290                                 warnx("invalid queue depth %s", value);
3291                                 retval = 1;
3292                                 goto bailout;
3293                         }
3294                 } else if (strncasecmp(name, "mcs", 3) == 0) {
3295                         char *endptr;
3296
3297                         io_opts->min_cmd_size = strtol(value, &endptr, 0);
3298                         if ((*endptr != '\0')
3299                          || ((io_opts->min_cmd_size > 16)
3300                           || (io_opts->min_cmd_size < 0))) {
3301                                 warnx("invalid minimum cmd size %s", value);
3302                                 retval = 1;
3303                                 goto bailout;
3304                         }
3305                 } else if (strncasecmp(name, "offset", 6) == 0) {
3306                         retval = expand_number(value, &io_opts->offset);
3307                         if (retval == -1) {
3308                                 warn("expand_number(3) failed on %s=%s", name,
3309                                     value);
3310                                 retval = 1;
3311                                 goto bailout;
3312                         }
3313                 } else if (strncasecmp(name, "debug", 5) == 0) {
3314                         char *endptr;
3315
3316                         io_opts->debug = strtoull(value, &endptr, 0);
3317                         if (*endptr != '\0') {
3318                                 warnx("invalid debug level %s", value);
3319                                 retval = 1;
3320                                 goto bailout;
3321                         }
3322                 } else {
3323                         warnx("Unrecognized parameter %s=%s", name, value);
3324                 }
3325         }
3326 bailout:
3327         free(orig_tmpstr);
3328
3329         return (retval);
3330 }
3331
3332 int
3333 main(int argc, char **argv)
3334 {
3335         int c;
3336         camdd_argmask arglist = CAMDD_ARG_NONE;
3337         int timeout = 0, retry_count = 1;
3338         int error = 0;
3339         uint64_t max_io = 0;
3340         struct camdd_io_opts *opt_list = NULL;
3341
3342         if (argc == 1) {
3343                 usage();
3344                 exit(1);
3345         }
3346
3347         opt_list = calloc(2, sizeof(struct camdd_io_opts));
3348         if (opt_list == NULL) {
3349                 warn("Unable to allocate option list");
3350                 error = 1;
3351                 goto bailout;
3352         }
3353
3354         while ((c = getopt(argc, argv, "C:Ehi:m:o:t:v")) != -1){
3355                 switch (c) {
3356                 case 'C':
3357                         retry_count = strtol(optarg, NULL, 0);
3358                         if (retry_count < 0)
3359                                 errx(1, "retry count %d is < 0",
3360                                      retry_count);
3361                         arglist |= CAMDD_ARG_RETRIES;
3362                         break;
3363                 case 'E':
3364                         arglist |= CAMDD_ARG_ERR_RECOVER;
3365                         break;
3366                 case 'i':
3367                 case 'o':
3368                         if (((c == 'i')
3369                           && (opt_list[0].dev_type != CAMDD_DEV_NONE))
3370                          || ((c == 'o')
3371                           && (opt_list[1].dev_type != CAMDD_DEV_NONE))) {
3372                                 errx(1, "Only one input and output path "
3373                                     "allowed");
3374                         }
3375                         error = camdd_parse_io_opts(optarg, (c == 'o') ? 1 : 0,
3376                             (c == 'o') ? &opt_list[1] : &opt_list[0]);
3377                         if (error != 0)
3378                                 goto bailout;
3379                         break;
3380                 case 'm':
3381                         error = expand_number(optarg, &max_io);
3382                         if (error == -1) {
3383                                 warn("invalid maximum I/O amount %s", optarg);
3384                                 error = 1;
3385                                 goto bailout;
3386                         }
3387                         break;
3388                 case 't':
3389                         timeout = strtol(optarg, NULL, 0);
3390                         if (timeout < 0)
3391                                 errx(1, "invalid timeout %d", timeout);
3392                         /* Convert the timeout from seconds to ms */
3393                         timeout *= 1000;
3394                         arglist |= CAMDD_ARG_TIMEOUT;
3395                         break;
3396                 case 'v':
3397                         arglist |= CAMDD_ARG_VERBOSE;
3398                         break;
3399                 case 'h':
3400                 default:
3401                         usage();
3402                         exit(1);
3403                         break; /*NOTREACHED*/
3404                 }
3405         }
3406
3407         if ((opt_list[0].dev_type == CAMDD_DEV_NONE)
3408          || (opt_list[1].dev_type == CAMDD_DEV_NONE))
3409                 errx(1, "Must specify both -i and -o");
3410
3411         /*
3412          * Set the timeout if the user hasn't specified one.
3413          */
3414         if (timeout == 0)
3415                 timeout = CAMDD_PASS_RW_TIMEOUT;
3416
3417         error = camdd_rw(opt_list, 2, max_io, retry_count, timeout);
3418
3419 bailout:
3420         free(opt_list);
3421
3422         exit(error);
3423 }