2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
35 #include <sys/types.h>
38 #include <sys/devicestat.h>
39 #include <sys/errno.h>
40 #include <sys/fcntl.h>
41 #include <sys/malloc.h>
44 #include <sys/selinfo.h>
46 #include <sys/sysent.h>
47 #include <sys/taskqueue.h>
50 #include <vm/vm_extern.h>
52 #include <machine/bus.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_periph.h>
57 #include <cam/cam_queue.h>
58 #include <cam/cam_xpt.h>
59 #include <cam/cam_xpt_periph.h>
60 #include <cam/cam_debug.h>
61 #include <cam/cam_compat.h>
62 #include <cam/cam_xpt_periph.h>
64 #include <cam/scsi/scsi_all.h>
65 #include <cam/scsi/scsi_pass.h>
68 PASS_FLAG_OPEN = 0x01,
69 PASS_FLAG_LOCKED = 0x02,
70 PASS_FLAG_INVALID = 0x04,
71 PASS_FLAG_INITIAL_PHYSPATH = 0x08,
72 PASS_FLAG_ZONE_INPROG = 0x10,
73 PASS_FLAG_ZONE_VALID = 0x20,
74 PASS_FLAG_UNMAPPED_CAPABLE = 0x40,
75 PASS_FLAG_ABANDONED_REF_SET = 0x80
87 #define ccb_type ppriv_field0
88 #define ccb_ioreq ppriv_ptr1
91 * The maximum number of memory segments we preallocate.
93 #define PASS_MAX_SEGS 16
97 PASS_IO_USER_SEG_MALLOC = 0x01,
98 PASS_IO_KERN_SEG_MALLOC = 0x02,
99 PASS_IO_ABANDONED = 0x04
104 union ccb *alloced_ccb;
105 union ccb *user_ccb_ptr;
106 camq_entry user_periph_links;
107 ccb_ppriv_area user_periph_priv;
108 struct cam_periph_map_info mapinfo;
110 ccb_flags data_flags;
112 bus_dma_segment_t user_segs[PASS_MAX_SEGS];
114 bus_dma_segment_t kern_segs[PASS_MAX_SEGS];
115 bus_dma_segment_t *user_segptr;
116 bus_dma_segment_t *kern_segptr;
118 uint32_t dirs[CAM_PERIPH_MAXMAPS];
119 uint32_t lengths[CAM_PERIPH_MAXMAPS];
120 uint8_t *user_bufs[CAM_PERIPH_MAXMAPS];
121 uint8_t *kern_bufs[CAM_PERIPH_MAXMAPS];
122 struct bintime start_time;
123 TAILQ_ENTRY(pass_io_req) links;
132 struct devstat *device_stats;
134 struct cdev *alias_dev;
135 struct task add_physpath_task;
136 struct task shutdown_kqueue_task;
137 struct selinfo read_select;
138 TAILQ_HEAD(, pass_io_req) incoming_queue;
139 TAILQ_HEAD(, pass_io_req) active_queue;
140 TAILQ_HEAD(, pass_io_req) abandoned_queue;
141 TAILQ_HEAD(, pass_io_req) done_queue;
142 struct cam_periph *periph;
144 char io_zone_name[12];
145 uma_zone_t pass_zone;
146 uma_zone_t pass_io_zone;
150 static d_open_t passopen;
151 static d_close_t passclose;
152 static d_ioctl_t passioctl;
153 static d_ioctl_t passdoioctl;
154 static d_poll_t passpoll;
155 static d_kqfilter_t passkqfilter;
156 static void passreadfiltdetach(struct knote *kn);
157 static int passreadfilt(struct knote *kn, long hint);
159 static periph_init_t passinit;
160 static periph_ctor_t passregister;
161 static periph_oninv_t passoninvalidate;
162 static periph_dtor_t passcleanup;
163 static periph_start_t passstart;
164 static void pass_shutdown_kqueue(void *context, int pending);
165 static void pass_add_physpath(void *context, int pending);
166 static void passasync(void *callback_arg, uint32_t code,
167 struct cam_path *path, void *arg);
168 static void passdone(struct cam_periph *periph,
169 union ccb *done_ccb);
170 static int passcreatezone(struct cam_periph *periph);
171 static void passiocleanup(struct pass_softc *softc,
172 struct pass_io_req *io_req);
173 static int passcopysglist(struct cam_periph *periph,
174 struct pass_io_req *io_req,
175 ccb_flags direction);
176 static int passmemsetup(struct cam_periph *periph,
177 struct pass_io_req *io_req);
178 static int passmemdone(struct cam_periph *periph,
179 struct pass_io_req *io_req);
180 static int passerror(union ccb *ccb, uint32_t cam_flags,
181 uint32_t sense_flags);
182 static int passsendccb(struct cam_periph *periph, union ccb *ccb,
184 static void passflags(union ccb *ccb, uint32_t *cam_flags,
185 uint32_t *sense_flags);
187 static struct periph_driver passdriver =
190 TAILQ_HEAD_INITIALIZER(passdriver.units), /* generation */ 0
193 PERIPHDRIVER_DECLARE(pass, passdriver);
195 static struct cdevsw pass_cdevsw = {
196 .d_version = D_VERSION,
197 .d_flags = D_TRACKCLOSE,
199 .d_close = passclose,
200 .d_ioctl = passioctl,
202 .d_kqfilter = passkqfilter,
206 static struct filterops passread_filtops = {
208 .f_detach = passreadfiltdetach,
209 .f_event = passreadfilt
212 static MALLOC_DEFINE(M_SCSIPASS, "scsi_pass", "scsi passthrough buffers");
220 * Install a global async callback. This callback will
221 * receive async callbacks like "new device found".
223 status = xpt_register_async(AC_FOUND_DEVICE, passasync, NULL, NULL);
225 if (status != CAM_REQ_CMP) {
226 printf("pass: Failed to attach master async callback "
227 "due to status 0x%x!\n", status);
233 passrejectios(struct cam_periph *periph)
235 struct pass_io_req *io_req, *io_req2;
236 struct pass_softc *softc;
238 softc = (struct pass_softc *)periph->softc;
241 * The user can no longer get status for I/O on the done queue, so
242 * clean up all outstanding I/O on the done queue.
244 TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) {
245 TAILQ_REMOVE(&softc->done_queue, io_req, links);
246 passiocleanup(softc, io_req);
247 uma_zfree(softc->pass_zone, io_req);
251 * The underlying device is gone, so we can't issue these I/Os.
252 * The devfs node has been shut down, so we can't return status to
253 * the user. Free any I/O left on the incoming queue.
255 TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links, io_req2) {
256 TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
257 passiocleanup(softc, io_req);
258 uma_zfree(softc->pass_zone, io_req);
262 * Normally we would put I/Os on the abandoned queue and acquire a
263 * reference when we saw the final close. But, the device went
264 * away and devfs may have moved everything off to deadfs by the
265 * time the I/O done callback is called; as a result, we won't see
266 * any more closes. So, if we have any active I/Os, we need to put
267 * them on the abandoned queue. When the abandoned queue is empty,
268 * we'll release the remaining reference (see below) to the peripheral.
270 TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links, io_req2) {
271 TAILQ_REMOVE(&softc->active_queue, io_req, links);
272 io_req->flags |= PASS_IO_ABANDONED;
273 TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req, links);
277 * If we put any I/O on the abandoned queue, acquire a reference.
279 if ((!TAILQ_EMPTY(&softc->abandoned_queue))
280 && ((softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0)) {
281 cam_periph_doacquire(periph);
282 softc->flags |= PASS_FLAG_ABANDONED_REF_SET;
287 passdevgonecb(void *arg)
289 struct cam_periph *periph;
291 struct pass_softc *softc;
294 periph = (struct cam_periph *)arg;
295 mtx = cam_periph_mtx(periph);
298 softc = (struct pass_softc *)periph->softc;
299 KASSERT(softc->open_count >= 0, ("Negative open count %d",
303 * When we get this callback, we will get no more close calls from
304 * devfs. So if we have any dangling opens, we need to release the
305 * reference held for that particular context.
307 for (i = 0; i < softc->open_count; i++)
308 cam_periph_release_locked(periph);
310 softc->open_count = 0;
313 * Release the reference held for the device node, it is gone now.
314 * Accordingly, inform all queued I/Os of their fate.
316 cam_periph_release_locked(periph);
317 passrejectios(periph);
320 * We reference the SIM lock directly here, instead of using
321 * cam_periph_unlock(). The reason is that the final call to
322 * cam_periph_release_locked() above could result in the periph
323 * getting freed. If that is the case, dereferencing the periph
324 * with a cam_periph_unlock() call would cause a page fault.
329 * We have to remove our kqueue context from a thread because it
330 * may sleep. It would be nice if we could get a callback from
331 * kqueue when it is done cleaning up resources.
333 taskqueue_enqueue(taskqueue_thread, &softc->shutdown_kqueue_task);
337 passoninvalidate(struct cam_periph *periph)
339 struct pass_softc *softc;
341 softc = (struct pass_softc *)periph->softc;
344 * De-register any async callbacks.
346 xpt_register_async(0, passasync, periph, periph->path);
348 softc->flags |= PASS_FLAG_INVALID;
351 * Tell devfs this device has gone away, and ask for a callback
352 * when it has cleaned up its state.
354 destroy_dev_sched_cb(softc->dev, passdevgonecb, periph);
358 passcleanup(struct cam_periph *periph)
360 struct pass_softc *softc;
362 softc = (struct pass_softc *)periph->softc;
364 cam_periph_assert(periph, MA_OWNED);
365 KASSERT(TAILQ_EMPTY(&softc->active_queue),
366 ("%s called when there are commands on the active queue!\n",
368 KASSERT(TAILQ_EMPTY(&softc->abandoned_queue),
369 ("%s called when there are commands on the abandoned queue!\n",
371 KASSERT(TAILQ_EMPTY(&softc->incoming_queue),
372 ("%s called when there are commands on the incoming queue!\n",
374 KASSERT(TAILQ_EMPTY(&softc->done_queue),
375 ("%s called when there are commands on the done queue!\n",
378 devstat_remove_entry(softc->device_stats);
380 cam_periph_unlock(periph);
383 * We call taskqueue_drain() for the physpath task to make sure it
384 * is complete. We drop the lock because this can potentially
385 * sleep. XXX KDM that is bad. Need a way to get a callback when
386 * a taskqueue is drained.
388 * Note that we don't drain the kqueue shutdown task queue. This
389 * is because we hold a reference on the periph for kqueue, and
390 * release that reference from the kqueue shutdown task queue. So
391 * we cannot come into this routine unless we've released that
392 * reference. Also, because that could be the last reference, we
393 * could be called from the cam_periph_release() call in
394 * pass_shutdown_kqueue(). In that case, the taskqueue_drain()
395 * would deadlock. It would be preferable if we had a way to
396 * get a callback when a taskqueue is done.
398 taskqueue_drain(taskqueue_thread, &softc->add_physpath_task);
401 * It should be safe to destroy the zones from here, because all
402 * of the references to this peripheral have been freed, and all
403 * I/O has been terminated and freed. We check the zones for NULL
404 * because they may not have been allocated yet if the device went
405 * away before any asynchronous I/O has been issued.
407 if (softc->pass_zone != NULL)
408 uma_zdestroy(softc->pass_zone);
409 if (softc->pass_io_zone != NULL)
410 uma_zdestroy(softc->pass_io_zone);
412 cam_periph_lock(periph);
414 free(softc, M_DEVBUF);
418 pass_shutdown_kqueue(void *context, int pending)
420 struct cam_periph *periph;
421 struct pass_softc *softc;
424 softc = periph->softc;
426 knlist_clear(&softc->read_select.si_note, /*is_locked*/ 0);
427 knlist_destroy(&softc->read_select.si_note);
430 * Release the reference we held for kqueue.
432 cam_periph_release(periph);
436 pass_add_physpath(void *context, int pending)
438 struct cam_periph *periph;
439 struct pass_softc *softc;
444 * If we have one, create a devfs alias for our
448 softc = periph->softc;
449 physpath = malloc(MAXPATHLEN, M_DEVBUF, M_WAITOK);
450 mtx = cam_periph_mtx(periph);
453 if (periph->flags & CAM_PERIPH_INVALID)
456 if (xpt_getattr(physpath, MAXPATHLEN,
457 "GEOM::physpath", periph->path) == 0
458 && strlen(physpath) != 0) {
460 make_dev_physpath_alias(MAKEDEV_WAITOK | MAKEDEV_CHECKNAME,
461 &softc->alias_dev, softc->dev,
462 softc->alias_dev, physpath);
468 * Now that we've made our alias, we no longer have to have a
469 * reference to the device.
471 if ((softc->flags & PASS_FLAG_INITIAL_PHYSPATH) == 0)
472 softc->flags |= PASS_FLAG_INITIAL_PHYSPATH;
475 * We always acquire a reference to the periph before queueing this
476 * task queue function, so it won't go away before we run.
478 while (pending-- > 0)
479 cam_periph_release_locked(periph);
482 free(physpath, M_DEVBUF);
486 passasync(void *callback_arg, uint32_t code,
487 struct cam_path *path, void *arg)
489 struct cam_periph *periph;
491 periph = (struct cam_periph *)callback_arg;
494 case AC_FOUND_DEVICE:
496 struct ccb_getdev *cgd;
499 cgd = (struct ccb_getdev *)arg;
504 * Allocate a peripheral instance for
505 * this device and start the probe
508 status = cam_periph_alloc(passregister, passoninvalidate,
509 passcleanup, passstart, "pass",
510 CAM_PERIPH_BIO, path,
511 passasync, AC_FOUND_DEVICE, cgd);
513 if (status != CAM_REQ_CMP
514 && status != CAM_REQ_INPROG) {
515 const struct cam_status_entry *entry;
517 entry = cam_fetch_status_entry(status);
519 printf("passasync: Unable to attach new device "
520 "due to status %#x: %s\n", status, entry ?
521 entry->status_text : "Unknown");
526 case AC_ADVINFO_CHANGED:
530 buftype = (uintptr_t)arg;
531 if (buftype == CDAI_TYPE_PHYS_PATH) {
532 struct pass_softc *softc;
534 softc = (struct pass_softc *)periph->softc;
536 * Acquire a reference to the periph before we
537 * start the taskqueue, so that we don't run into
538 * a situation where the periph goes away before
539 * the task queue has a chance to run.
541 if (cam_periph_acquire(periph) != 0)
544 taskqueue_enqueue(taskqueue_thread,
545 &softc->add_physpath_task);
550 cam_periph_async(periph, code, path, arg);
556 passregister(struct cam_periph *periph, void *arg)
558 struct pass_softc *softc;
559 struct ccb_getdev *cgd;
560 struct ccb_pathinq cpi;
561 struct make_dev_args args;
564 cgd = (struct ccb_getdev *)arg;
566 printf("%s: no getdev CCB, can't register device\n", __func__);
567 return(CAM_REQ_CMP_ERR);
570 softc = (struct pass_softc *)malloc(sizeof(*softc),
574 printf("%s: Unable to probe new device. "
575 "Unable to allocate softc\n", __func__);
576 return(CAM_REQ_CMP_ERR);
579 bzero(softc, sizeof(*softc));
580 softc->state = PASS_STATE_NORMAL;
581 if (cgd->protocol == PROTO_SCSI || cgd->protocol == PROTO_ATAPI)
582 softc->pd_type = SID_TYPE(&cgd->inq_data);
583 else if (cgd->protocol == PROTO_SATAPM)
584 softc->pd_type = T_ENCLOSURE;
586 softc->pd_type = T_DIRECT;
588 periph->softc = softc;
589 softc->periph = periph;
590 TAILQ_INIT(&softc->incoming_queue);
591 TAILQ_INIT(&softc->active_queue);
592 TAILQ_INIT(&softc->abandoned_queue);
593 TAILQ_INIT(&softc->done_queue);
594 snprintf(softc->zone_name, sizeof(softc->zone_name), "%s%d",
595 periph->periph_name, periph->unit_number);
596 snprintf(softc->io_zone_name, sizeof(softc->io_zone_name), "%s%dIO",
597 periph->periph_name, periph->unit_number);
598 softc->io_zone_size = maxphys;
599 knlist_init_mtx(&softc->read_select.si_note, cam_periph_mtx(periph));
601 xpt_path_inq(&cpi, periph->path);
604 softc->maxio = DFLTPHYS; /* traditional default */
605 else if (cpi.maxio > maxphys)
606 softc->maxio = maxphys; /* for safety */
608 softc->maxio = cpi.maxio; /* real value */
610 if (cpi.hba_misc & PIM_UNMAPPED)
611 softc->flags |= PASS_FLAG_UNMAPPED_CAPABLE;
614 * We pass in 0 for a blocksize, since we don't
615 * know what the blocksize of this device is, if
616 * it even has a blocksize.
618 cam_periph_unlock(periph);
619 no_tags = (cgd->inq_data.flags & SID_CmdQue) == 0;
620 softc->device_stats = devstat_new_entry("pass",
621 periph->unit_number, 0,
623 | (no_tags ? DEVSTAT_NO_ORDERED_TAGS : 0),
625 XPORT_DEVSTAT_TYPE(cpi.transport) |
627 DEVSTAT_PRIORITY_PASS);
630 * Initialize the taskqueue handler for shutting down kqueue.
632 TASK_INIT(&softc->shutdown_kqueue_task, /*priority*/ 0,
633 pass_shutdown_kqueue, periph);
636 * Acquire a reference to the periph that we can release once we've
637 * cleaned up the kqueue.
639 if (cam_periph_acquire(periph) != 0) {
640 xpt_print(periph->path, "%s: lost periph during "
641 "registration!\n", __func__);
642 cam_periph_lock(periph);
643 return (CAM_REQ_CMP_ERR);
647 * Acquire a reference to the periph before we create the devfs
648 * instance for it. We'll release this reference once the devfs
649 * instance has been freed.
651 if (cam_periph_acquire(periph) != 0) {
652 xpt_print(periph->path, "%s: lost periph during "
653 "registration!\n", __func__);
654 cam_periph_lock(periph);
655 return (CAM_REQ_CMP_ERR);
658 /* Register the device */
659 make_dev_args_init(&args);
660 args.mda_devsw = &pass_cdevsw;
661 args.mda_unit = periph->unit_number;
662 args.mda_uid = UID_ROOT;
663 args.mda_gid = GID_OPERATOR;
664 args.mda_mode = 0600;
665 args.mda_si_drv1 = periph;
666 args.mda_flags = MAKEDEV_NOWAIT;
667 error = make_dev_s(&args, &softc->dev, "%s%d", periph->periph_name,
668 periph->unit_number);
670 cam_periph_lock(periph);
671 cam_periph_release_locked(periph);
672 return (CAM_REQ_CMP_ERR);
676 * Hold a reference to the periph before we create the physical
677 * path alias so it can't go away.
679 if (cam_periph_acquire(periph) != 0) {
680 xpt_print(periph->path, "%s: lost periph during "
681 "registration!\n", __func__);
682 cam_periph_lock(periph);
683 return (CAM_REQ_CMP_ERR);
686 cam_periph_lock(periph);
688 TASK_INIT(&softc->add_physpath_task, /*priority*/0,
689 pass_add_physpath, periph);
692 * See if physical path information is already available.
694 taskqueue_enqueue(taskqueue_thread, &softc->add_physpath_task);
697 * Add an async callback so that we get notified if
698 * this device goes away or its physical path
699 * (stored in the advanced info data of the EDT) has
702 xpt_register_async(AC_LOST_DEVICE | AC_ADVINFO_CHANGED,
703 passasync, periph, periph->path);
706 xpt_announce_periph(periph, NULL);
712 passopen(struct cdev *dev, int flags, int fmt, struct thread *td)
714 struct cam_periph *periph;
715 struct pass_softc *softc;
718 periph = (struct cam_periph *)dev->si_drv1;
719 if (cam_periph_acquire(periph) != 0)
722 cam_periph_lock(periph);
724 softc = (struct pass_softc *)periph->softc;
726 if (softc->flags & PASS_FLAG_INVALID) {
727 cam_periph_release_locked(periph);
728 cam_periph_unlock(periph);
733 * Don't allow access when we're running at a high securelevel.
735 error = securelevel_gt(td->td_ucred, 1);
737 cam_periph_release_locked(periph);
738 cam_periph_unlock(periph);
743 * Only allow read-write access.
745 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) {
746 cam_periph_release_locked(periph);
747 cam_periph_unlock(periph);
752 * We don't allow nonblocking access.
754 if ((flags & O_NONBLOCK) != 0) {
755 xpt_print(periph->path, "can't do nonblocking access\n");
756 cam_periph_release_locked(periph);
757 cam_periph_unlock(periph);
763 cam_periph_unlock(periph);
769 passclose(struct cdev *dev, int flag, int fmt, struct thread *td)
771 struct cam_periph *periph;
772 struct pass_softc *softc;
775 periph = (struct cam_periph *)dev->si_drv1;
776 mtx = cam_periph_mtx(periph);
779 softc = periph->softc;
782 if (softc->open_count == 0) {
783 struct pass_io_req *io_req, *io_req2;
785 TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) {
786 TAILQ_REMOVE(&softc->done_queue, io_req, links);
787 passiocleanup(softc, io_req);
788 uma_zfree(softc->pass_zone, io_req);
791 TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links,
793 TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
794 passiocleanup(softc, io_req);
795 uma_zfree(softc->pass_zone, io_req);
799 * If there are any active I/Os, we need to forcibly acquire a
800 * reference to the peripheral so that we don't go away
801 * before they complete. We'll release the reference when
802 * the abandoned queue is empty.
804 io_req = TAILQ_FIRST(&softc->active_queue);
806 && (softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0) {
807 cam_periph_doacquire(periph);
808 softc->flags |= PASS_FLAG_ABANDONED_REF_SET;
812 * Since the I/O in the active queue is not under our
813 * control, just set a flag so that we can clean it up when
814 * it completes and put it on the abandoned queue. This
815 * will prevent our sending spurious completions in the
816 * event that the device is opened again before these I/Os
819 TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links,
821 TAILQ_REMOVE(&softc->active_queue, io_req, links);
822 io_req->flags |= PASS_IO_ABANDONED;
823 TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req,
828 cam_periph_release_locked(periph);
831 * We reference the lock directly here, instead of using
832 * cam_periph_unlock(). The reason is that the call to
833 * cam_periph_release_locked() above could result in the periph
834 * getting freed. If that is the case, dereferencing the periph
835 * with a cam_periph_unlock() call would cause a page fault.
837 * cam_periph_release() avoids this problem using the same method,
838 * but we're manually acquiring and dropping the lock here to
839 * protect the open count and avoid another lock acquisition and
848 passstart(struct cam_periph *periph, union ccb *start_ccb)
850 struct pass_softc *softc;
852 softc = (struct pass_softc *)periph->softc;
854 switch (softc->state) {
855 case PASS_STATE_NORMAL: {
856 struct pass_io_req *io_req;
859 * Check for any queued I/O requests that require an
862 io_req = TAILQ_FIRST(&softc->incoming_queue);
863 if (io_req == NULL) {
864 xpt_release_ccb(start_ccb);
867 TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
868 TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links);
870 * Merge the user's CCB into the allocated CCB.
872 xpt_merge_ccb(start_ccb, &io_req->ccb);
873 start_ccb->ccb_h.ccb_type = PASS_CCB_QUEUED_IO;
874 start_ccb->ccb_h.ccb_ioreq = io_req;
875 start_ccb->ccb_h.cbfcnp = passdone;
876 io_req->alloced_ccb = start_ccb;
877 binuptime(&io_req->start_time);
878 devstat_start_transaction(softc->device_stats,
879 &io_req->start_time);
881 xpt_action(start_ccb);
884 * If we have any more I/O waiting, schedule ourselves again.
886 if (!TAILQ_EMPTY(&softc->incoming_queue))
887 xpt_schedule(periph, CAM_PRIORITY_NORMAL);
896 passdone(struct cam_periph *periph, union ccb *done_ccb)
898 struct pass_softc *softc;
899 struct ccb_scsiio *csio;
901 softc = (struct pass_softc *)periph->softc;
903 cam_periph_assert(periph, MA_OWNED);
905 csio = &done_ccb->csio;
906 switch (csio->ccb_h.ccb_type) {
907 case PASS_CCB_QUEUED_IO: {
908 struct pass_io_req *io_req;
910 io_req = done_ccb->ccb_h.ccb_ioreq;
912 xpt_print(periph->path, "%s: called for user CCB %p\n",
913 __func__, io_req->user_ccb_ptr);
915 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) &&
916 ((io_req->flags & PASS_IO_ABANDONED) == 0)) {
918 uint32_t cam_flags, sense_flags;
920 passflags(done_ccb, &cam_flags, &sense_flags);
921 error = passerror(done_ccb, cam_flags, sense_flags);
923 if (error == ERESTART) {
924 KASSERT(((sense_flags & SF_NO_RETRY) == 0),
925 ("passerror returned ERESTART with no retry requested\n"));
931 * Copy the allocated CCB contents back to the malloced CCB
932 * so we can give status back to the user when he requests it.
934 bcopy(done_ccb, &io_req->ccb, sizeof(*done_ccb));
937 * Log data/transaction completion with devstat(9).
939 switch (done_ccb->ccb_h.func_code) {
941 devstat_end_transaction(softc->device_stats,
942 done_ccb->csio.dxfer_len - done_ccb->csio.resid,
943 done_ccb->csio.tag_action & 0x3,
944 ((done_ccb->ccb_h.flags & CAM_DIR_MASK) ==
945 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
946 (done_ccb->ccb_h.flags & CAM_DIR_OUT) ?
947 DEVSTAT_WRITE : DEVSTAT_READ, NULL,
948 &io_req->start_time);
951 devstat_end_transaction(softc->device_stats,
952 done_ccb->ataio.dxfer_len - done_ccb->ataio.resid,
953 0, /* Not used in ATA */
954 ((done_ccb->ccb_h.flags & CAM_DIR_MASK) ==
955 CAM_DIR_NONE) ? DEVSTAT_NO_DATA :
956 (done_ccb->ccb_h.flags & CAM_DIR_OUT) ?
957 DEVSTAT_WRITE : DEVSTAT_READ, NULL,
958 &io_req->start_time);
962 * XXX KDM this isn't quite right, but there isn't
963 * currently an easy way to represent a bidirectional
964 * transfer in devstat. The only way to do it
965 * and have the byte counts come out right would
966 * mean that we would have to record two
967 * transactions, one for the request and one for the
968 * response. For now, so that we report something,
969 * just treat the entire thing as a read.
971 devstat_end_transaction(softc->device_stats,
972 done_ccb->smpio.smp_request_len +
973 done_ccb->smpio.smp_response_len,
974 DEVSTAT_TAG_SIMPLE, DEVSTAT_READ, NULL,
975 &io_req->start_time);
978 devstat_end_transaction(softc->device_stats, 0,
979 DEVSTAT_TAG_NONE, DEVSTAT_NO_DATA, NULL,
980 &io_req->start_time);
985 * In the normal case, take the completed I/O off of the
986 * active queue and put it on the done queue. Notitfy the
987 * user that we have a completed I/O.
989 if ((io_req->flags & PASS_IO_ABANDONED) == 0) {
990 TAILQ_REMOVE(&softc->active_queue, io_req, links);
991 TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links);
992 selwakeuppri(&softc->read_select, PRIBIO);
993 KNOTE_LOCKED(&softc->read_select.si_note, 0);
996 * In the case of an abandoned I/O (final close
997 * without fetching the I/O), take it off of the
998 * abandoned queue and free it.
1000 TAILQ_REMOVE(&softc->abandoned_queue, io_req, links);
1001 passiocleanup(softc, io_req);
1002 uma_zfree(softc->pass_zone, io_req);
1005 * Release the done_ccb here, since we may wind up
1006 * freeing the peripheral when we decrement the
1007 * reference count below.
1009 xpt_release_ccb(done_ccb);
1012 * If the abandoned queue is empty, we can release
1013 * our reference to the periph since we won't have
1014 * any more completions coming.
1016 if ((TAILQ_EMPTY(&softc->abandoned_queue))
1017 && (softc->flags & PASS_FLAG_ABANDONED_REF_SET)) {
1018 softc->flags &= ~PASS_FLAG_ABANDONED_REF_SET;
1019 cam_periph_release_locked(periph);
1023 * We have already released the CCB, so we can
1031 xpt_release_ccb(done_ccb);
1035 passcreatezone(struct cam_periph *periph)
1037 struct pass_softc *softc;
1041 softc = (struct pass_softc *)periph->softc;
1043 cam_periph_assert(periph, MA_OWNED);
1044 KASSERT(((softc->flags & PASS_FLAG_ZONE_VALID) == 0),
1045 ("%s called when the pass(4) zone is valid!\n", __func__));
1046 KASSERT((softc->pass_zone == NULL),
1047 ("%s called when the pass(4) zone is allocated!\n", __func__));
1049 if ((softc->flags & PASS_FLAG_ZONE_INPROG) == 0) {
1051 * We're the first context through, so we need to create
1052 * the pass(4) UMA zone for I/O requests.
1054 softc->flags |= PASS_FLAG_ZONE_INPROG;
1057 * uma_zcreate() does a blocking (M_WAITOK) allocation,
1058 * so we cannot hold a mutex while we call it.
1060 cam_periph_unlock(periph);
1062 softc->pass_zone = uma_zcreate(softc->zone_name,
1063 sizeof(struct pass_io_req), NULL, NULL, NULL, NULL,
1064 /*align*/ 0, /*flags*/ 0);
1066 softc->pass_io_zone = uma_zcreate(softc->io_zone_name,
1067 softc->io_zone_size, NULL, NULL, NULL, NULL,
1068 /*align*/ 0, /*flags*/ 0);
1070 cam_periph_lock(periph);
1072 if ((softc->pass_zone == NULL)
1073 || (softc->pass_io_zone == NULL)) {
1074 if (softc->pass_zone == NULL)
1075 xpt_print(periph->path, "unable to allocate "
1076 "IO Req UMA zone\n");
1078 xpt_print(periph->path, "unable to allocate "
1080 softc->flags &= ~PASS_FLAG_ZONE_INPROG;
1085 * Set the flags appropriately and notify any other waiters.
1087 softc->flags &= ~PASS_FLAG_ZONE_INPROG;
1088 softc->flags |= PASS_FLAG_ZONE_VALID;
1089 wakeup(&softc->pass_zone);
1092 * In this case, the UMA zone has not yet been created, but
1093 * another context is in the process of creating it. We
1094 * need to sleep until the creation is either done or has
1097 while ((softc->flags & PASS_FLAG_ZONE_INPROG)
1098 && ((softc->flags & PASS_FLAG_ZONE_VALID) == 0)) {
1099 error = msleep(&softc->pass_zone,
1100 cam_periph_mtx(periph), PRIBIO,
1106 * If the zone creation failed, no luck for the user.
1108 if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0){
1118 passiocleanup(struct pass_softc *softc, struct pass_io_req *io_req)
1121 uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
1126 switch (ccb->ccb_h.func_code) {
1128 numbufs = min(io_req->num_bufs, 2);
1131 data_ptrs[0] = (uint8_t **)&ccb->cdm.matches;
1133 data_ptrs[0] = (uint8_t **)&ccb->cdm.patterns;
1134 data_ptrs[1] = (uint8_t **)&ccb->cdm.matches;
1138 case XPT_CONT_TARGET_IO:
1139 data_ptrs[0] = &ccb->csio.data_ptr;
1140 numbufs = min(io_req->num_bufs, 1);
1143 data_ptrs[0] = &ccb->ataio.data_ptr;
1144 numbufs = min(io_req->num_bufs, 1);
1147 numbufs = min(io_req->num_bufs, 2);
1148 data_ptrs[0] = &ccb->smpio.smp_request;
1149 data_ptrs[1] = &ccb->smpio.smp_response;
1151 case XPT_DEV_ADVINFO:
1152 numbufs = min(io_req->num_bufs, 1);
1153 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
1156 case XPT_NVME_ADMIN:
1157 data_ptrs[0] = &ccb->nvmeio.data_ptr;
1158 numbufs = min(io_req->num_bufs, 1);
1161 /* allow ourselves to be swapped once again */
1163 break; /* NOTREACHED */
1166 if (io_req->flags & PASS_IO_USER_SEG_MALLOC) {
1167 free(io_req->user_segptr, M_SCSIPASS);
1168 io_req->user_segptr = NULL;
1172 * We only want to free memory we malloced.
1174 if (io_req->data_flags == CAM_DATA_VADDR) {
1175 for (i = 0; i < io_req->num_bufs; i++) {
1176 if (io_req->kern_bufs[i] == NULL)
1179 free(io_req->kern_bufs[i], M_SCSIPASS);
1180 io_req->kern_bufs[i] = NULL;
1182 } else if (io_req->data_flags == CAM_DATA_SG) {
1183 for (i = 0; i < io_req->num_kern_segs; i++) {
1184 if ((uint8_t *)(uintptr_t)
1185 io_req->kern_segptr[i].ds_addr == NULL)
1188 uma_zfree(softc->pass_io_zone, (uint8_t *)(uintptr_t)
1189 io_req->kern_segptr[i].ds_addr);
1190 io_req->kern_segptr[i].ds_addr = 0;
1194 if (io_req->flags & PASS_IO_KERN_SEG_MALLOC) {
1195 free(io_req->kern_segptr, M_SCSIPASS);
1196 io_req->kern_segptr = NULL;
1199 if (io_req->data_flags != CAM_DATA_PADDR) {
1200 for (i = 0; i < numbufs; i++) {
1202 * Restore the user's buffer pointers to their
1205 if (io_req->user_bufs[i] != NULL)
1206 *data_ptrs[i] = io_req->user_bufs[i];
1213 passcopysglist(struct cam_periph *periph, struct pass_io_req *io_req,
1214 ccb_flags direction)
1216 bus_size_t kern_watermark, user_watermark, len_to_copy;
1217 bus_dma_segment_t *user_sglist, *kern_sglist;
1224 user_sglist = io_req->user_segptr;
1225 kern_sglist = io_req->kern_segptr;
1227 for (i = 0, j = 0; i < io_req->num_user_segs &&
1228 j < io_req->num_kern_segs;) {
1229 uint8_t *user_ptr, *kern_ptr;
1231 len_to_copy = min(user_sglist[i].ds_len -user_watermark,
1232 kern_sglist[j].ds_len - kern_watermark);
1234 user_ptr = (uint8_t *)(uintptr_t)user_sglist[i].ds_addr;
1235 user_ptr = user_ptr + user_watermark;
1236 kern_ptr = (uint8_t *)(uintptr_t)kern_sglist[j].ds_addr;
1237 kern_ptr = kern_ptr + kern_watermark;
1239 user_watermark += len_to_copy;
1240 kern_watermark += len_to_copy;
1242 if (direction == CAM_DIR_IN) {
1243 error = copyout(kern_ptr, user_ptr, len_to_copy);
1245 xpt_print(periph->path, "%s: copyout of %u "
1246 "bytes from %p to %p failed with "
1247 "error %d\n", __func__, len_to_copy,
1248 kern_ptr, user_ptr, error);
1252 error = copyin(user_ptr, kern_ptr, len_to_copy);
1254 xpt_print(periph->path, "%s: copyin of %u "
1255 "bytes from %p to %p failed with "
1256 "error %d\n", __func__, len_to_copy,
1257 user_ptr, kern_ptr, error);
1262 if (user_sglist[i].ds_len == user_watermark) {
1267 if (kern_sglist[j].ds_len == kern_watermark) {
1279 passmemsetup(struct cam_periph *periph, struct pass_io_req *io_req)
1282 struct pass_softc *softc;
1284 uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS];
1285 uint32_t lengths[CAM_PERIPH_MAXMAPS];
1286 uint32_t dirs[CAM_PERIPH_MAXMAPS];
1288 uint16_t *seg_cnt_ptr;
1292 cam_periph_assert(periph, MA_NOTOWNED);
1294 softc = periph->softc;
1302 switch(ccb->ccb_h.func_code) {
1304 if (ccb->cdm.match_buf_len == 0) {
1305 printf("%s: invalid match buffer length 0\n", __func__);
1308 if (ccb->cdm.pattern_buf_len > 0) {
1309 data_ptrs[0] = (uint8_t **)&ccb->cdm.patterns;
1310 lengths[0] = ccb->cdm.pattern_buf_len;
1311 dirs[0] = CAM_DIR_OUT;
1312 data_ptrs[1] = (uint8_t **)&ccb->cdm.matches;
1313 lengths[1] = ccb->cdm.match_buf_len;
1314 dirs[1] = CAM_DIR_IN;
1317 data_ptrs[0] = (uint8_t **)&ccb->cdm.matches;
1318 lengths[0] = ccb->cdm.match_buf_len;
1319 dirs[0] = CAM_DIR_IN;
1322 io_req->data_flags = CAM_DATA_VADDR;
1325 case XPT_CONT_TARGET_IO:
1326 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
1330 * The user shouldn't be able to supply a bio.
1332 if ((ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO)
1335 io_req->data_flags = ccb->ccb_h.flags & CAM_DATA_MASK;
1337 data_ptrs[0] = &ccb->csio.data_ptr;
1338 lengths[0] = ccb->csio.dxfer_len;
1339 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1340 num_segs = ccb->csio.sglist_cnt;
1341 seg_cnt_ptr = &ccb->csio.sglist_cnt;
1343 maxmap = softc->maxio;
1346 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
1350 * We only support a single virtual address for ATA I/O.
1352 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR)
1355 io_req->data_flags = CAM_DATA_VADDR;
1357 data_ptrs[0] = &ccb->ataio.data_ptr;
1358 lengths[0] = ccb->ataio.dxfer_len;
1359 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1361 maxmap = softc->maxio;
1364 io_req->data_flags = CAM_DATA_VADDR;
1366 data_ptrs[0] = &ccb->smpio.smp_request;
1367 lengths[0] = ccb->smpio.smp_request_len;
1368 dirs[0] = CAM_DIR_OUT;
1369 data_ptrs[1] = &ccb->smpio.smp_response;
1370 lengths[1] = ccb->smpio.smp_response_len;
1371 dirs[1] = CAM_DIR_IN;
1373 maxmap = softc->maxio;
1375 case XPT_DEV_ADVINFO:
1376 if (ccb->cdai.bufsiz == 0)
1379 io_req->data_flags = CAM_DATA_VADDR;
1381 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf;
1382 lengths[0] = ccb->cdai.bufsiz;
1383 dirs[0] = CAM_DIR_IN;
1386 case XPT_NVME_ADMIN:
1388 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
1391 io_req->data_flags = ccb->ccb_h.flags & CAM_DATA_MASK;
1393 data_ptrs[0] = &ccb->nvmeio.data_ptr;
1394 lengths[0] = ccb->nvmeio.dxfer_len;
1395 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK;
1396 num_segs = ccb->nvmeio.sglist_cnt;
1397 seg_cnt_ptr = &ccb->nvmeio.sglist_cnt;
1399 maxmap = softc->maxio;
1403 break; /* NOTREACHED */
1406 io_req->num_bufs = numbufs;
1409 * If there is a maximum, check to make sure that the user's
1410 * request fits within the limit. In general, we should only have
1411 * a maximum length for requests that go to hardware. Otherwise it
1412 * is whatever we're able to malloc.
1414 for (i = 0; i < numbufs; i++) {
1415 io_req->user_bufs[i] = *data_ptrs[i];
1416 io_req->dirs[i] = dirs[i];
1417 io_req->lengths[i] = lengths[i];
1422 if (lengths[i] <= maxmap)
1425 xpt_print(periph->path, "%s: data length %u > max allowed %u "
1426 "bytes\n", __func__, lengths[i], maxmap);
1431 switch (io_req->data_flags) {
1432 case CAM_DATA_VADDR:
1433 /* Map or copy the buffer into kernel address space */
1434 for (i = 0; i < numbufs; i++) {
1438 * If for some reason no length is specified, we
1439 * don't need to allocate anything.
1441 if (io_req->lengths[i] == 0)
1444 tmp_buf = malloc(lengths[i], M_SCSIPASS,
1446 io_req->kern_bufs[i] = tmp_buf;
1447 *data_ptrs[i] = tmp_buf;
1450 xpt_print(periph->path, "%s: malloced %p len %u, user "
1451 "buffer %p, operation: %s\n", __func__,
1452 tmp_buf, lengths[i], io_req->user_bufs[i],
1453 (dirs[i] == CAM_DIR_IN) ? "read" : "write");
1456 * We only need to copy in if the user is writing.
1458 if (dirs[i] != CAM_DIR_OUT)
1461 error = copyin(io_req->user_bufs[i],
1462 io_req->kern_bufs[i], lengths[i]);
1464 xpt_print(periph->path, "%s: copy of user "
1465 "buffer from %p to %p failed with "
1466 "error %d\n", __func__,
1467 io_req->user_bufs[i],
1468 io_req->kern_bufs[i], error);
1473 case CAM_DATA_PADDR:
1474 /* Pass down the pointer as-is */
1477 size_t sg_length, size_to_go, alloc_size;
1478 uint32_t num_segs_needed;
1481 * Copy the user S/G list in, and then copy in the
1482 * individual segments.
1485 * We shouldn't see this, but check just in case.
1488 xpt_print(periph->path, "%s: cannot currently handle "
1489 "more than one S/G list per CCB\n", __func__);
1495 * We have to have at least one segment.
1497 if (num_segs == 0) {
1498 xpt_print(periph->path, "%s: CAM_DATA_SG flag set, "
1499 "but sglist_cnt=0!\n", __func__);
1505 * Make sure the user specified the total length and didn't
1506 * just leave it to us to decode the S/G list.
1508 if (lengths[0] == 0) {
1509 xpt_print(periph->path, "%s: no dxfer_len specified, "
1510 "but CAM_DATA_SG flag is set!\n", __func__);
1516 * We allocate buffers in io_zone_size increments for an
1517 * S/G list. This will generally be maxphys.
1519 if (lengths[0] <= softc->io_zone_size)
1520 num_segs_needed = 1;
1522 num_segs_needed = lengths[0] / softc->io_zone_size;
1523 if ((lengths[0] % softc->io_zone_size) != 0)
1527 /* Figure out the size of the S/G list */
1528 sg_length = num_segs * sizeof(bus_dma_segment_t);
1529 io_req->num_user_segs = num_segs;
1530 io_req->num_kern_segs = num_segs_needed;
1532 /* Save the user's S/G list pointer for later restoration */
1533 io_req->user_bufs[0] = *data_ptrs[0];
1536 * If we have enough segments allocated by default to handle
1537 * the length of the user's S/G list,
1539 if (num_segs > PASS_MAX_SEGS) {
1540 io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) *
1541 num_segs, M_SCSIPASS, M_WAITOK | M_ZERO);
1542 io_req->flags |= PASS_IO_USER_SEG_MALLOC;
1544 io_req->user_segptr = io_req->user_segs;
1546 error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length);
1548 xpt_print(periph->path, "%s: copy of user S/G list "
1549 "from %p to %p failed with error %d\n",
1550 __func__, *data_ptrs[0], io_req->user_segptr,
1555 if (num_segs_needed > PASS_MAX_SEGS) {
1556 io_req->kern_segptr = malloc(sizeof(bus_dma_segment_t) *
1557 num_segs_needed, M_SCSIPASS, M_WAITOK | M_ZERO);
1558 io_req->flags |= PASS_IO_KERN_SEG_MALLOC;
1560 io_req->kern_segptr = io_req->kern_segs;
1564 * Allocate the kernel S/G list.
1566 for (size_to_go = lengths[0], i = 0;
1567 size_to_go > 0 && i < num_segs_needed;
1568 i++, size_to_go -= alloc_size) {
1571 alloc_size = min(size_to_go, softc->io_zone_size);
1572 kern_ptr = uma_zalloc(softc->pass_io_zone, M_WAITOK);
1573 io_req->kern_segptr[i].ds_addr =
1574 (bus_addr_t)(uintptr_t)kern_ptr;
1575 io_req->kern_segptr[i].ds_len = alloc_size;
1577 if (size_to_go > 0) {
1578 printf("%s: size_to_go = %zu, software error!\n",
1579 __func__, size_to_go);
1584 *data_ptrs[0] = (uint8_t *)io_req->kern_segptr;
1585 *seg_cnt_ptr = io_req->num_kern_segs;
1588 * We only need to copy data here if the user is writing.
1590 if (dirs[0] == CAM_DIR_OUT)
1591 error = passcopysglist(periph, io_req, dirs[0]);
1594 case CAM_DATA_SG_PADDR: {
1598 * We shouldn't see this, but check just in case.
1601 printf("%s: cannot currently handle more than one "
1602 "S/G list per CCB\n", __func__);
1608 * We have to have at least one segment.
1610 if (num_segs == 0) {
1611 xpt_print(periph->path, "%s: CAM_DATA_SG_PADDR flag "
1612 "set, but sglist_cnt=0!\n", __func__);
1618 * Make sure the user specified the total length and didn't
1619 * just leave it to us to decode the S/G list.
1621 if (lengths[0] == 0) {
1622 xpt_print(periph->path, "%s: no dxfer_len specified, "
1623 "but CAM_DATA_SG flag is set!\n", __func__);
1628 /* Figure out the size of the S/G list */
1629 sg_length = num_segs * sizeof(bus_dma_segment_t);
1630 io_req->num_user_segs = num_segs;
1631 io_req->num_kern_segs = io_req->num_user_segs;
1633 /* Save the user's S/G list pointer for later restoration */
1634 io_req->user_bufs[0] = *data_ptrs[0];
1636 if (num_segs > PASS_MAX_SEGS) {
1637 io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) *
1638 num_segs, M_SCSIPASS, M_WAITOK | M_ZERO);
1639 io_req->flags |= PASS_IO_USER_SEG_MALLOC;
1641 io_req->user_segptr = io_req->user_segs;
1643 io_req->kern_segptr = io_req->user_segptr;
1645 error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length);
1647 xpt_print(periph->path, "%s: copy of user S/G list "
1648 "from %p to %p failed with error %d\n",
1649 __func__, *data_ptrs[0], io_req->user_segptr,
1658 * A user shouldn't be attaching a bio to the CCB. It
1659 * isn't a user-accessible structure.
1667 passiocleanup(softc, io_req);
1673 passmemdone(struct cam_periph *periph, struct pass_io_req *io_req)
1675 struct pass_softc *softc;
1680 softc = (struct pass_softc *)periph->softc;
1682 switch (io_req->data_flags) {
1683 case CAM_DATA_VADDR:
1685 * Copy back to the user buffer if this was a read.
1687 for (i = 0; i < io_req->num_bufs; i++) {
1688 if (io_req->dirs[i] != CAM_DIR_IN)
1691 error = copyout(io_req->kern_bufs[i],
1692 io_req->user_bufs[i], io_req->lengths[i]);
1694 xpt_print(periph->path, "Unable to copy %u "
1695 "bytes from %p to user address %p\n",
1697 io_req->kern_bufs[i],
1698 io_req->user_bufs[i]);
1703 case CAM_DATA_PADDR:
1704 /* Do nothing. The pointer is a physical address already */
1708 * Copy back to the user buffer if this was a read.
1709 * Restore the user's S/G list buffer pointer.
1711 if (io_req->dirs[0] == CAM_DIR_IN)
1712 error = passcopysglist(periph, io_req, io_req->dirs[0]);
1714 case CAM_DATA_SG_PADDR:
1716 * Restore the user's S/G list buffer pointer. No need to
1728 * Reset the user's pointers to their original values and free
1731 passiocleanup(softc, io_req);
1737 passioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
1741 if ((error = passdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) {
1742 error = cam_compat_ioctl(dev, cmd, addr, flag, td, passdoioctl);
1748 passdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td)
1750 struct cam_periph *periph;
1751 struct pass_softc *softc;
1755 periph = (struct cam_periph *)dev->si_drv1;
1756 cam_periph_lock(periph);
1757 softc = (struct pass_softc *)periph->softc;
1768 inccb = (union ccb *)addr;
1769 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1770 if (inccb->ccb_h.func_code == XPT_SCSI_IO)
1771 inccb->csio.bio = NULL;
1774 if (inccb->ccb_h.flags & CAM_UNLOCKED) {
1780 * Some CCB types, like scan bus and scan lun can only go
1781 * through the transport layer device.
1783 if (inccb->ccb_h.func_code & XPT_FC_XPT_ONLY) {
1784 xpt_print(periph->path, "CCB function code %#x is "
1785 "restricted to the XPT device\n",
1786 inccb->ccb_h.func_code);
1791 /* Compatibility for RL/priority-unaware code. */
1792 priority = inccb->ccb_h.pinfo.priority;
1793 if (priority <= CAM_PRIORITY_OOB)
1794 priority += CAM_PRIORITY_OOB + 1;
1797 * Non-immediate CCBs need a CCB from the per-device pool
1798 * of CCBs, which is scheduled by the transport layer.
1799 * Immediate CCBs and user-supplied CCBs should just be
1802 if ((inccb->ccb_h.func_code & XPT_FC_QUEUED)
1803 && ((inccb->ccb_h.func_code & XPT_FC_USER_CCB) == 0)) {
1804 ccb = cam_periph_getccb(periph, priority);
1807 ccb = xpt_alloc_ccb_nowait();
1810 xpt_setup_ccb(&ccb->ccb_h, periph->path,
1816 xpt_print(periph->path, "unable to allocate CCB\n");
1821 error = passsendccb(periph, ccb, inccb);
1826 xpt_release_ccb(ccb);
1832 struct pass_io_req *io_req;
1833 union ccb **user_ccb, *ccb;
1836 #ifdef COMPAT_FREEBSD32
1837 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
1842 if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0) {
1843 error = passcreatezone(periph);
1849 * We're going to do a blocking allocation for this I/O
1850 * request, so we have to drop the lock.
1852 cam_periph_unlock(periph);
1854 io_req = uma_zalloc(softc->pass_zone, M_WAITOK | M_ZERO);
1856 user_ccb = (union ccb **)addr;
1859 * Unlike the CAMIOCOMMAND ioctl above, we only have a
1860 * pointer to the user's CCB, so we have to copy the whole
1861 * thing in to a buffer we have allocated (above) instead
1862 * of allowing the ioctl code to malloc a buffer and copy
1865 * This is an advantage for this asynchronous interface,
1866 * since we don't want the memory to get freed while the
1867 * CCB is outstanding.
1870 xpt_print(periph->path, "Copying user CCB %p to "
1871 "kernel address %p\n", *user_ccb, ccb);
1873 error = copyin(*user_ccb, ccb, sizeof(*ccb));
1875 xpt_print(periph->path, "Copy of user CCB %p to "
1876 "kernel address %p failed with error %d\n",
1877 *user_ccb, ccb, error);
1878 goto camioqueue_error;
1880 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1881 if (ccb->ccb_h.func_code == XPT_SCSI_IO)
1882 ccb->csio.bio = NULL;
1885 if (ccb->ccb_h.flags & CAM_UNLOCKED) {
1887 goto camioqueue_error;
1890 if (ccb->ccb_h.flags & CAM_CDB_POINTER) {
1891 if (ccb->csio.cdb_len > IOCDBLEN) {
1893 goto camioqueue_error;
1895 error = copyin(ccb->csio.cdb_io.cdb_ptr,
1896 ccb->csio.cdb_io.cdb_bytes, ccb->csio.cdb_len);
1898 goto camioqueue_error;
1899 ccb->ccb_h.flags &= ~CAM_CDB_POINTER;
1903 * Some CCB types, like scan bus and scan lun can only go
1904 * through the transport layer device.
1906 if (ccb->ccb_h.func_code & XPT_FC_XPT_ONLY) {
1907 xpt_print(periph->path, "CCB function code %#x is "
1908 "restricted to the XPT device\n",
1909 ccb->ccb_h.func_code);
1911 goto camioqueue_error;
1915 * Save the user's CCB pointer as well as his linked list
1916 * pointers and peripheral private area so that we can
1917 * restore these later.
1919 io_req->user_ccb_ptr = *user_ccb;
1920 io_req->user_periph_links = ccb->ccb_h.periph_links;
1921 io_req->user_periph_priv = ccb->ccb_h.periph_priv;
1924 * Now that we've saved the user's values, we can set our
1925 * own peripheral private entry.
1927 ccb->ccb_h.ccb_ioreq = io_req;
1929 /* Compatibility for RL/priority-unaware code. */
1930 priority = ccb->ccb_h.pinfo.priority;
1931 if (priority <= CAM_PRIORITY_OOB)
1932 priority += CAM_PRIORITY_OOB + 1;
1935 * Setup fields in the CCB like the path and the priority.
1936 * The path in particular cannot be done in userland, since
1937 * it is a pointer to a kernel data structure.
1939 xpt_setup_ccb_flags(&ccb->ccb_h, periph->path, priority,
1943 * Setup our done routine. There is no way for the user to
1944 * have a valid pointer here.
1946 ccb->ccb_h.cbfcnp = passdone;
1948 fc = ccb->ccb_h.func_code;
1950 * If this function code has memory that can be mapped in
1951 * or out, we need to call passmemsetup().
1953 if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO)
1954 || (fc == XPT_SMP_IO) || (fc == XPT_DEV_MATCH)
1955 || (fc == XPT_DEV_ADVINFO)
1956 || (fc == XPT_NVME_ADMIN) || (fc == XPT_NVME_IO)) {
1957 error = passmemsetup(periph, io_req);
1959 goto camioqueue_error;
1961 io_req->mapinfo.num_bufs_used = 0;
1963 cam_periph_lock(periph);
1966 * Everything goes on the incoming queue initially.
1968 TAILQ_INSERT_TAIL(&softc->incoming_queue, io_req, links);
1971 * If the CCB is queued, and is not a user CCB, then
1972 * we need to allocate a slot for it. Call xpt_schedule()
1973 * so that our start routine will get called when a CCB is
1976 if ((fc & XPT_FC_QUEUED)
1977 && ((fc & XPT_FC_USER_CCB) == 0)) {
1978 xpt_schedule(periph, priority);
1983 * At this point, the CCB in question is either an
1984 * immediate CCB (like XPT_DEV_ADVINFO) or it is a user CCB
1985 * and therefore should be malloced, not allocated via a slot.
1986 * Remove the CCB from the incoming queue and add it to the
1989 TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
1990 TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links);
1995 * If this is not a queued CCB (i.e. it is an immediate CCB),
1996 * then it is already done. We need to put it on the done
1997 * queue for the user to fetch.
1999 if ((fc & XPT_FC_QUEUED) == 0) {
2000 TAILQ_REMOVE(&softc->active_queue, io_req, links);
2001 TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links);
2006 uma_zfree(softc->pass_zone, io_req);
2007 cam_periph_lock(periph);
2012 union ccb **user_ccb;
2013 struct pass_io_req *io_req;
2016 #ifdef COMPAT_FREEBSD32
2017 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
2022 user_ccb = (union ccb **)addr;
2025 io_req = TAILQ_FIRST(&softc->done_queue);
2026 if (io_req == NULL) {
2032 * Remove the I/O from the done queue.
2034 TAILQ_REMOVE(&softc->done_queue, io_req, links);
2037 * We have to drop the lock during the copyout because the
2038 * copyout can result in VM faults that require sleeping.
2040 cam_periph_unlock(periph);
2043 * Do any needed copies (e.g. for reads) and revert the
2044 * pointers in the CCB back to the user's pointers.
2046 error = passmemdone(periph, io_req);
2050 io_req->ccb.ccb_h.periph_links = io_req->user_periph_links;
2051 io_req->ccb.ccb_h.periph_priv = io_req->user_periph_priv;
2054 xpt_print(periph->path, "Copying to user CCB %p from "
2055 "kernel address %p\n", *user_ccb, &io_req->ccb);
2058 error = copyout(&io_req->ccb, *user_ccb, sizeof(union ccb));
2060 xpt_print(periph->path, "Copy to user CCB %p from "
2061 "kernel address %p failed with error %d\n",
2062 *user_ccb, &io_req->ccb, error);
2066 * Prefer the first error we got back, and make sure we
2067 * don't overwrite bad status with good.
2072 cam_periph_lock(periph);
2075 * At this point, if there was an error, we could potentially
2076 * re-queue the I/O and try again. But why? The error
2077 * would almost certainly happen again. We might as well
2080 uma_zfree(softc->pass_zone, io_req);
2084 error = cam_periph_ioctl(periph, cmd, addr, passerror);
2089 cam_periph_unlock(periph);
2095 passpoll(struct cdev *dev, int poll_events, struct thread *td)
2097 struct cam_periph *periph;
2098 struct pass_softc *softc;
2101 periph = (struct cam_periph *)dev->si_drv1;
2102 softc = (struct pass_softc *)periph->softc;
2104 revents = poll_events & (POLLOUT | POLLWRNORM);
2105 if ((poll_events & (POLLIN | POLLRDNORM)) != 0) {
2106 cam_periph_lock(periph);
2108 if (!TAILQ_EMPTY(&softc->done_queue)) {
2109 revents |= poll_events & (POLLIN | POLLRDNORM);
2111 cam_periph_unlock(periph);
2113 selrecord(td, &softc->read_select);
2120 passkqfilter(struct cdev *dev, struct knote *kn)
2122 struct cam_periph *periph;
2123 struct pass_softc *softc;
2125 periph = (struct cam_periph *)dev->si_drv1;
2126 softc = (struct pass_softc *)periph->softc;
2128 kn->kn_hook = (caddr_t)periph;
2129 kn->kn_fop = &passread_filtops;
2130 knlist_add(&softc->read_select.si_note, kn, 0);
2136 passreadfiltdetach(struct knote *kn)
2138 struct cam_periph *periph;
2139 struct pass_softc *softc;
2141 periph = (struct cam_periph *)kn->kn_hook;
2142 softc = (struct pass_softc *)periph->softc;
2144 knlist_remove(&softc->read_select.si_note, kn, 0);
2148 passreadfilt(struct knote *kn, long hint)
2150 struct cam_periph *periph;
2151 struct pass_softc *softc;
2154 periph = (struct cam_periph *)kn->kn_hook;
2155 softc = (struct pass_softc *)periph->softc;
2157 cam_periph_assert(periph, MA_OWNED);
2159 if (TAILQ_EMPTY(&softc->done_queue))
2168 * Generally, "ccb" should be the CCB supplied by the kernel. "inccb"
2169 * should be the CCB that is copied in from the user.
2172 passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb)
2174 struct pass_softc *softc;
2175 struct cam_periph_map_info mapinfo;
2180 softc = (struct pass_softc *)periph->softc;
2183 * There are some fields in the CCB header that need to be
2184 * preserved, the rest we get from the user.
2186 xpt_merge_ccb(ccb, inccb);
2188 if (ccb->ccb_h.flags & CAM_CDB_POINTER) {
2189 cmd = __builtin_alloca(ccb->csio.cdb_len);
2190 error = copyin(ccb->csio.cdb_io.cdb_ptr, cmd, ccb->csio.cdb_len);
2193 ccb->csio.cdb_io.cdb_ptr = cmd;
2197 * Let cam_periph_mapmem do a sanity check on the data pointer format.
2198 * Even if no data transfer is needed, it's a cheap check and it
2199 * simplifies the code.
2201 fc = ccb->ccb_h.func_code;
2202 if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO) || (fc == XPT_SMP_IO)
2203 || (fc == XPT_DEV_MATCH) || (fc == XPT_DEV_ADVINFO) || (fc == XPT_MMC_IO)
2204 || (fc == XPT_NVME_ADMIN) || (fc == XPT_NVME_IO)) {
2205 bzero(&mapinfo, sizeof(mapinfo));
2208 * cam_periph_mapmem calls into proc and vm functions that can
2209 * sleep as well as trigger I/O, so we can't hold the lock.
2210 * Dropping it here is reasonably safe.
2212 cam_periph_unlock(periph);
2213 error = cam_periph_mapmem(ccb, &mapinfo, softc->maxio);
2214 cam_periph_lock(periph);
2217 * cam_periph_mapmem returned an error, we can't continue.
2218 * Return the error to the user.
2223 /* Ensure that the unmap call later on is a no-op. */
2224 mapinfo.num_bufs_used = 0;
2227 * If the user wants us to perform any error recovery, then honor
2228 * that request. Otherwise, it's up to the user to perform any
2232 uint32_t cam_flags, sense_flags;
2234 passflags(ccb, &cam_flags, &sense_flags);
2235 cam_periph_runccb(ccb, passerror, cam_flags,
2236 sense_flags, softc->device_stats);
2239 cam_periph_unlock(periph);
2240 cam_periph_unmapmem(ccb, &mapinfo);
2241 cam_periph_lock(periph);
2243 ccb->ccb_h.cbfcnp = NULL;
2244 ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv;
2245 bcopy(ccb, inccb, sizeof(union ccb));
2251 * Set the cam_flags and sense_flags based on whether or not the request wants
2252 * error recovery. In order to log errors via devctl, we need to do at least
2253 * minimal recovery. We do this by not retrying unit attention (we let the
2254 * requester do it, or not, if appropriate) and specifically asking for no
2255 * recovery, like we do during device probing.
2258 passflags(union ccb *ccb, uint32_t *cam_flags, uint32_t *sense_flags)
2260 if ((ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) != 0) {
2261 *cam_flags = CAM_RETRY_SELTO;
2262 *sense_flags = SF_RETRY_UA | SF_NO_PRINT;
2265 *sense_flags = SF_NO_RETRY | SF_NO_RECOVERY | SF_NO_PRINT;
2270 passerror(union ccb *ccb, uint32_t cam_flags, uint32_t sense_flags)
2273 return(cam_periph_error(ccb, cam_flags, sense_flags));