3 * Copyright (c) 2010-2013 Hans Petter Selasky. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include "opt_compat.h"
29 #include <sys/stdint.h>
30 #include <sys/stddef.h>
31 #include <sys/param.h>
32 #include <sys/types.h>
33 #include <sys/systm.h>
35 #include <sys/kernel.h>
37 #include <sys/linker_set.h>
38 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/condvar.h>
42 #include <sys/sysctl.h>
43 #include <sys/unistd.h>
44 #include <sys/malloc.h>
49 #include <sys/queue.h>
50 #include <sys/fcntl.h>
52 #include <sys/vnode.h>
53 #include <sys/selinfo.h>
54 #include <sys/ptrace.h>
56 #include <machine/bus.h>
61 #include <fs/cuse/cuse_defs.h>
62 #include <fs/cuse/cuse_ioctl.h>
64 MODULE_VERSION(cuse, 1);
67 * Prevent cuse4bsd.ko and cuse.ko from loading at the same time by
68 * declaring support for the cuse4bsd interface in cuse.ko:
70 MODULE_VERSION(cuse4bsd, 1);
72 #define NBUSY ((uint8_t *)1)
75 FEATURE(cuse, "Userspace character devices");
82 struct cuse_client_command {
83 TAILQ_ENTRY(cuse_client_command) entry;
84 struct cuse_command sub;
87 struct thread *entered;
88 struct cuse_client *client;
89 struct proc *proc_curr;
97 struct cuse_server *owner;
100 uint32_t is_allocated;
103 struct cuse_server_dev {
104 TAILQ_ENTRY(cuse_server_dev) entry;
105 struct cuse_server *server;
106 struct cdev *kern_dev;
107 struct cuse_dev *user_dev;
111 TAILQ_ENTRY(cuse_server) entry;
112 TAILQ_HEAD(, cuse_client_command) head;
113 TAILQ_HEAD(, cuse_server_dev) hdev;
114 TAILQ_HEAD(, cuse_client) hcli;
116 struct selinfo selinfo;
123 TAILQ_ENTRY(cuse_client) entry;
124 TAILQ_ENTRY(cuse_client) entry_ref;
125 struct cuse_client_command cmds[CUSE_CMD_MAX];
126 struct cuse_server *server;
127 struct cuse_server_dev *server_dev;
129 uint8_t ioctl_buffer[CUSE_BUFFER_MAX] __aligned(4);
131 int fflags; /* file flags */
132 int cflags; /* client flags */
133 #define CUSE_CLI_IS_CLOSING 0x01
134 #define CUSE_CLI_KNOTE_NEED_READ 0x02
135 #define CUSE_CLI_KNOTE_NEED_WRITE 0x04
136 #define CUSE_CLI_KNOTE_HAS_READ 0x08
137 #define CUSE_CLI_KNOTE_HAS_WRITE 0x10
140 #define CUSE_CLIENT_CLOSING(pcc) \
141 ((pcc)->cflags & CUSE_CLI_IS_CLOSING)
143 static MALLOC_DEFINE(M_CUSE, "cuse", "CUSE memory");
145 static TAILQ_HEAD(, cuse_server) cuse_server_head;
146 static struct mtx cuse_mtx;
147 static struct cdev *cuse_dev;
148 static struct cuse_server *cuse_alloc_unit[CUSE_DEVICES_MAX];
149 static int cuse_alloc_unit_id[CUSE_DEVICES_MAX];
150 static struct cuse_memory cuse_mem[CUSE_ALLOC_UNIT_MAX];
152 static void cuse_server_wakeup_all_client_locked(struct cuse_server *pcs);
153 static void cuse_client_kqfilter_read_detach(struct knote *kn);
154 static void cuse_client_kqfilter_write_detach(struct knote *kn);
155 static int cuse_client_kqfilter_read_event(struct knote *kn, long hint);
156 static int cuse_client_kqfilter_write_event(struct knote *kn, long hint);
158 static struct filterops cuse_client_kqfilter_read_ops = {
160 .f_detach = cuse_client_kqfilter_read_detach,
161 .f_event = cuse_client_kqfilter_read_event,
164 static struct filterops cuse_client_kqfilter_write_ops = {
166 .f_detach = cuse_client_kqfilter_write_detach,
167 .f_event = cuse_client_kqfilter_write_event,
170 static d_open_t cuse_client_open;
171 static d_close_t cuse_client_close;
172 static d_ioctl_t cuse_client_ioctl;
173 static d_read_t cuse_client_read;
174 static d_write_t cuse_client_write;
175 static d_poll_t cuse_client_poll;
176 static d_mmap_t cuse_client_mmap;
177 static d_kqfilter_t cuse_client_kqfilter;
179 static struct cdevsw cuse_client_devsw = {
180 .d_version = D_VERSION,
181 .d_open = cuse_client_open,
182 .d_close = cuse_client_close,
183 .d_ioctl = cuse_client_ioctl,
184 .d_name = "cuse_client",
185 .d_flags = D_TRACKCLOSE,
186 .d_read = cuse_client_read,
187 .d_write = cuse_client_write,
188 .d_poll = cuse_client_poll,
189 .d_mmap = cuse_client_mmap,
190 .d_kqfilter = cuse_client_kqfilter,
193 static d_open_t cuse_server_open;
194 static d_close_t cuse_server_close;
195 static d_ioctl_t cuse_server_ioctl;
196 static d_read_t cuse_server_read;
197 static d_write_t cuse_server_write;
198 static d_poll_t cuse_server_poll;
199 static d_mmap_t cuse_server_mmap;
201 static struct cdevsw cuse_server_devsw = {
202 .d_version = D_VERSION,
203 .d_open = cuse_server_open,
204 .d_close = cuse_server_close,
205 .d_ioctl = cuse_server_ioctl,
206 .d_name = "cuse_server",
207 .d_flags = D_TRACKCLOSE,
208 .d_read = cuse_server_read,
209 .d_write = cuse_server_write,
210 .d_poll = cuse_server_poll,
211 .d_mmap = cuse_server_mmap,
214 static void cuse_client_is_closing(struct cuse_client *);
215 static int cuse_free_unit_by_id_locked(struct cuse_server *, int);
226 mtx_unlock(&cuse_mtx);
230 cuse_cmd_lock(struct cuse_client_command *pccmd)
232 sx_xlock(&pccmd->sx);
236 cuse_cmd_unlock(struct cuse_client_command *pccmd)
238 sx_xunlock(&pccmd->sx);
242 cuse_kern_init(void *arg)
244 TAILQ_INIT(&cuse_server_head);
246 mtx_init(&cuse_mtx, "cuse-mtx", NULL, MTX_DEF);
248 cuse_dev = make_dev(&cuse_server_devsw, 0,
249 UID_ROOT, GID_OPERATOR, 0600, "cuse");
251 printf("Cuse v%d.%d.%d @ /dev/cuse\n",
252 (CUSE_VERSION >> 16) & 0xFF, (CUSE_VERSION >> 8) & 0xFF,
253 (CUSE_VERSION >> 0) & 0xFF);
256 SYSINIT(cuse_kern_init, SI_SUB_DEVFS, SI_ORDER_ANY, cuse_kern_init, 0);
259 cuse_kern_uninit(void *arg)
265 printf("Cuse: Please exit all /dev/cuse instances "
266 "and processes which have used this device.\n");
268 pause("DRAIN", 2 * hz);
271 ptr = TAILQ_FIRST(&cuse_server_head);
278 if (cuse_dev != NULL)
279 destroy_dev(cuse_dev);
281 mtx_destroy(&cuse_mtx);
284 SYSUNINIT(cuse_kern_uninit, SI_SUB_DEVFS, SI_ORDER_ANY, cuse_kern_uninit, 0);
287 cuse_server_get(struct cuse_server **ppcs)
289 struct cuse_server *pcs;
292 error = devfs_get_cdevpriv((void **)&pcs);
297 /* check if closing */
299 if (pcs->is_closing) {
310 cuse_server_is_closing(struct cuse_server *pcs)
312 struct cuse_client *pcc;
319 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
320 cuse_client_is_closing(pcc);
324 static struct cuse_client_command *
325 cuse_server_find_command(struct cuse_server *pcs, struct thread *td)
327 struct cuse_client *pcc;
333 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
334 if (CUSE_CLIENT_CLOSING(pcc))
336 for (n = 0; n != CUSE_CMD_MAX; n++) {
337 if (pcc->cmds[n].entered == td)
338 return (&pcc->cmds[n]);
346 cuse_str_filter(char *ptr)
350 while (((c = *ptr) != 0)) {
352 if ((c >= 'a') && (c <= 'z')) {
356 if ((c >= 'A') && (c <= 'Z')) {
360 if ((c >= '0') && (c <= '9')) {
364 if ((c == '.') || (c == '_') || (c == '/')) {
375 cuse_convert_error(int error)
383 case CUSE_ERR_WOULDBLOCK:
384 return (EWOULDBLOCK);
385 case CUSE_ERR_INVALID:
387 case CUSE_ERR_NO_MEMORY:
391 case CUSE_ERR_SIGNAL:
399 cuse_server_free_memory(struct cuse_server *pcs)
401 struct cuse_memory *mem;
404 for (n = 0; n != CUSE_ALLOC_UNIT_MAX; n++) {
407 /* this memory is never freed */
408 if (mem->owner == pcs) {
410 mem->is_allocated = 0;
416 cuse_server_alloc_memory(struct cuse_server *pcs,
417 struct cuse_memory *mem, uint32_t page_count)
424 if (mem->virtaddr == NBUSY) {
428 if (mem->virtaddr != NULL) {
429 if (mem->is_allocated != 0) {
433 if (mem->page_count == page_count) {
434 mem->is_allocated = 1;
442 memset(mem, 0, sizeof(*mem));
444 mem->virtaddr = NBUSY;
448 ptr = malloc(page_count * PAGE_SIZE, M_CUSE, M_WAITOK | M_ZERO);
457 mem->virtaddr = NULL;
462 mem->page_count = page_count;
463 mem->is_allocated = 1;
471 cuse_client_get(struct cuse_client **ppcc)
473 struct cuse_client *pcc;
476 /* try to get private data */
477 error = devfs_get_cdevpriv((void **)&pcc);
482 /* check if closing */
484 if (CUSE_CLIENT_CLOSING(pcc) || pcc->server->is_closing) {
495 cuse_client_is_closing(struct cuse_client *pcc)
497 struct cuse_client_command *pccmd;
500 if (CUSE_CLIENT_CLOSING(pcc))
503 pcc->cflags |= CUSE_CLI_IS_CLOSING;
504 pcc->server_dev = NULL;
506 for (n = 0; n != CUSE_CMD_MAX; n++) {
508 pccmd = &pcc->cmds[n];
510 if (pccmd->entry.tqe_prev != NULL) {
511 TAILQ_REMOVE(&pcc->server->head, pccmd, entry);
512 pccmd->entry.tqe_prev = NULL;
514 cv_broadcast(&pccmd->cv);
519 cuse_client_send_command_locked(struct cuse_client_command *pccmd,
520 uintptr_t data_ptr, unsigned long arg, int fflags, int ioflag)
522 unsigned long cuse_fflags = 0;
523 struct cuse_server *pcs;
526 cuse_fflags |= CUSE_FFLAG_READ;
529 cuse_fflags |= CUSE_FFLAG_WRITE;
531 if (ioflag & IO_NDELAY)
532 cuse_fflags |= CUSE_FFLAG_NONBLOCK;
534 pccmd->sub.fflags = cuse_fflags;
535 pccmd->sub.data_pointer = data_ptr;
536 pccmd->sub.argument = arg;
538 pcs = pccmd->client->server;
540 if ((pccmd->entry.tqe_prev == NULL) &&
541 (CUSE_CLIENT_CLOSING(pccmd->client) == 0) &&
542 (pcs->is_closing == 0)) {
543 TAILQ_INSERT_TAIL(&pcs->head, pccmd, entry);
549 cuse_client_got_signal(struct cuse_client_command *pccmd)
551 struct cuse_server *pcs;
553 pccmd->got_signal = 1;
555 pccmd = &pccmd->client->cmds[CUSE_CMD_SIGNAL];
557 pcs = pccmd->client->server;
559 if ((pccmd->entry.tqe_prev == NULL) &&
560 (CUSE_CLIENT_CLOSING(pccmd->client) == 0) &&
561 (pcs->is_closing == 0)) {
562 TAILQ_INSERT_TAIL(&pcs->head, pccmd, entry);
568 cuse_client_receive_command_locked(struct cuse_client_command *pccmd,
569 uint8_t *arg_ptr, uint32_t arg_len)
575 pccmd->proc_curr = curthread->td_proc;
577 if (CUSE_CLIENT_CLOSING(pccmd->client) ||
578 pccmd->client->server->is_closing) {
579 error = CUSE_ERR_OTHER;
582 while (pccmd->command == CUSE_CMD_NONE) {
584 cv_wait(&pccmd->cv, &cuse_mtx);
586 error = cv_wait_sig(&pccmd->cv, &cuse_mtx);
589 cuse_client_got_signal(pccmd);
591 if (CUSE_CLIENT_CLOSING(pccmd->client) ||
592 pccmd->client->server->is_closing) {
593 error = CUSE_ERR_OTHER;
598 error = pccmd->error;
599 pccmd->command = CUSE_CMD_NONE;
600 cv_signal(&pccmd->cv);
604 /* wait until all process references are gone */
606 pccmd->proc_curr = NULL;
608 while (pccmd->proc_refs != 0)
609 cv_wait(&pccmd->cv, &cuse_mtx);
614 /*------------------------------------------------------------------------*
616 *------------------------------------------------------------------------*/
619 cuse_server_free_dev(struct cuse_server_dev *pcsd)
621 struct cuse_server *pcs;
622 struct cuse_client *pcc;
624 /* get server pointer */
627 /* prevent creation of more devices */
629 if (pcsd->kern_dev != NULL)
630 pcsd->kern_dev->si_drv1 = NULL;
632 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
633 if (pcc->server_dev == pcsd)
634 cuse_client_is_closing(pcc);
638 /* destroy device, if any */
639 if (pcsd->kern_dev != NULL) {
640 /* destroy device synchronously */
641 destroy_dev(pcsd->kern_dev);
647 cuse_server_free(void *arg)
649 struct cuse_server *pcs = arg;
650 struct cuse_server_dev *pcsd;
654 if (pcs->refs != 0) {
658 cuse_server_is_closing(pcs);
659 /* final client wakeup, if any */
660 cuse_server_wakeup_all_client_locked(pcs);
662 TAILQ_REMOVE(&cuse_server_head, pcs, entry);
664 cuse_free_unit_by_id_locked(pcs, -1);
666 while ((pcsd = TAILQ_FIRST(&pcs->hdev)) != NULL) {
667 TAILQ_REMOVE(&pcs->hdev, pcsd, entry);
669 cuse_server_free_dev(pcsd);
673 cuse_server_free_memory(pcs);
675 knlist_clear(&pcs->selinfo.si_note, 1);
676 knlist_destroy(&pcs->selinfo.si_note);
680 seldrain(&pcs->selinfo);
682 cv_destroy(&pcs->cv);
688 cuse_server_open(struct cdev *dev, int fflags, int devtype, struct thread *td)
690 struct cuse_server *pcs;
692 pcs = malloc(sizeof(*pcs), M_CUSE, M_WAITOK | M_ZERO);
696 if (devfs_set_cdevpriv(pcs, &cuse_server_free)) {
697 printf("Cuse: Cannot set cdevpriv.\n");
702 /* store current process ID */
703 pcs->pid = curproc->p_pid;
705 TAILQ_INIT(&pcs->head);
706 TAILQ_INIT(&pcs->hdev);
707 TAILQ_INIT(&pcs->hcli);
709 cv_init(&pcs->cv, "cuse-server-cv");
711 knlist_init_mtx(&pcs->selinfo.si_note, &cuse_mtx);
715 TAILQ_INSERT_TAIL(&cuse_server_head, pcs, entry);
722 cuse_server_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
724 struct cuse_server *pcs;
727 error = cuse_server_get(&pcs);
732 cuse_server_is_closing(pcs);
733 /* final client wakeup, if any */
734 cuse_server_wakeup_all_client_locked(pcs);
736 knlist_clear(&pcs->selinfo.si_note, 1);
744 cuse_server_read(struct cdev *dev, struct uio *uio, int ioflag)
750 cuse_server_write(struct cdev *dev, struct uio *uio, int ioflag)
756 cuse_server_ioctl_copy_locked(struct cuse_client_command *pccmd,
757 struct cuse_data_chunk *pchk, int isread)
763 offset = pchk->peer_ptr - CUSE_BUF_MIN_PTR;
765 if (pchk->length > CUSE_BUFFER_MAX)
768 if (offset >= CUSE_BUFFER_MAX)
771 if ((offset + pchk->length) > CUSE_BUFFER_MAX)
774 p_proc = pccmd->proc_curr;
778 if (pccmd->proc_refs < 0)
787 (void *)pchk->local_ptr,
788 pccmd->client->ioctl_buffer + offset,
792 pccmd->client->ioctl_buffer + offset,
793 (void *)pchk->local_ptr,
801 if (pccmd->proc_curr == NULL)
802 cv_signal(&pccmd->cv);
808 cuse_proc2proc_copy(struct proc *proc_s, vm_offset_t data_s,
809 struct proc *proc_d, vm_offset_t data_d, size_t len)
812 struct proc *proc_cur;
816 proc_cur = td->td_proc;
818 if (proc_cur == proc_d) {
820 .iov_base = (caddr_t)data_d,
826 .uio_offset = (off_t)data_s,
828 .uio_segflg = UIO_USERSPACE,
834 error = proc_rwmem(proc_s, &uio);
837 } else if (proc_cur == proc_s) {
839 .iov_base = (caddr_t)data_s,
845 .uio_offset = (off_t)data_d,
847 .uio_segflg = UIO_USERSPACE,
853 error = proc_rwmem(proc_d, &uio);
862 cuse_server_data_copy_locked(struct cuse_client_command *pccmd,
863 struct cuse_data_chunk *pchk, int isread)
868 p_proc = pccmd->proc_curr;
872 if (pccmd->proc_refs < 0)
880 error = cuse_proc2proc_copy(
881 curthread->td_proc, pchk->local_ptr,
882 p_proc, pchk->peer_ptr,
885 error = cuse_proc2proc_copy(
886 p_proc, pchk->peer_ptr,
887 curthread->td_proc, pchk->local_ptr,
895 if (pccmd->proc_curr == NULL)
896 cv_signal(&pccmd->cv);
902 cuse_alloc_unit_by_id_locked(struct cuse_server *pcs, int id)
909 for (match = n = 0; n != CUSE_DEVICES_MAX; n++) {
910 if (cuse_alloc_unit[n] != NULL) {
911 if ((cuse_alloc_unit_id[n] ^ id) & CUSE_ID_MASK)
913 if ((cuse_alloc_unit_id[n] & ~CUSE_ID_MASK) == x) {
922 for (n = 0; n != CUSE_DEVICES_MAX; n++) {
923 if (cuse_alloc_unit[n] == NULL) {
924 cuse_alloc_unit[n] = pcs;
925 cuse_alloc_unit_id[n] = id | x;
934 cuse_server_wakeup_locked(struct cuse_server *pcs)
936 selwakeup(&pcs->selinfo);
937 KNOTE_LOCKED(&pcs->selinfo.si_note, 0);
941 cuse_server_wakeup_all_client_locked(struct cuse_server *pcs)
943 struct cuse_client *pcc;
945 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
946 pcc->cflags |= (CUSE_CLI_KNOTE_NEED_READ |
947 CUSE_CLI_KNOTE_NEED_WRITE);
949 cuse_server_wakeup_locked(pcs);
953 cuse_free_unit_by_id_locked(struct cuse_server *pcs, int id)
958 for (n = 0; n != CUSE_DEVICES_MAX; n++) {
959 if (cuse_alloc_unit[n] == pcs) {
960 if (cuse_alloc_unit_id[n] == id || id == -1) {
961 cuse_alloc_unit[n] = NULL;
962 cuse_alloc_unit_id[n] = 0;
968 return (found ? 0 : EINVAL);
972 cuse_server_ioctl(struct cdev *dev, unsigned long cmd,
973 caddr_t data, int fflag, struct thread *td)
975 struct cuse_server *pcs;
978 error = cuse_server_get(&pcs);
983 struct cuse_client_command *pccmd;
984 struct cuse_client *pcc;
985 struct cuse_command *pcmd;
986 struct cuse_alloc_info *pai;
987 struct cuse_create_dev *pcd;
988 struct cuse_server_dev *pcsd;
989 struct cuse_data_chunk *pchk;
992 case CUSE_IOCTL_GET_COMMAND:
997 while ((pccmd = TAILQ_FIRST(&pcs->head)) == NULL) {
998 error = cv_wait_sig(&pcs->cv, &cuse_mtx);
1000 if (pcs->is_closing)
1009 TAILQ_REMOVE(&pcs->head, pccmd, entry);
1010 pccmd->entry.tqe_prev = NULL;
1012 pccmd->entered = curthread;
1020 case CUSE_IOCTL_SYNC_COMMAND:
1023 while ((pccmd = cuse_server_find_command(pcs, curthread)) != NULL) {
1025 /* send sync command */
1026 pccmd->entered = NULL;
1027 pccmd->error = *(int *)data;
1028 pccmd->command = CUSE_CMD_SYNC;
1030 /* signal peer, if any */
1031 cv_signal(&pccmd->cv);
1037 case CUSE_IOCTL_ALLOC_UNIT:
1040 n = cuse_alloc_unit_by_id_locked(pcs,
1041 CUSE_ID_DEFAULT(0));
1050 case CUSE_IOCTL_ALLOC_UNIT_BY_ID:
1054 n = (n & CUSE_ID_MASK);
1057 n = cuse_alloc_unit_by_id_locked(pcs, n);
1066 case CUSE_IOCTL_FREE_UNIT:
1070 n = CUSE_ID_DEFAULT(n);
1073 error = cuse_free_unit_by_id_locked(pcs, n);
1077 case CUSE_IOCTL_FREE_UNIT_BY_ID:
1082 error = cuse_free_unit_by_id_locked(pcs, n);
1086 case CUSE_IOCTL_ALLOC_MEMORY:
1090 if (pai->alloc_nr >= CUSE_ALLOC_UNIT_MAX) {
1094 if (pai->page_count > CUSE_ALLOC_PAGES_MAX) {
1098 error = cuse_server_alloc_memory(pcs,
1099 &cuse_mem[pai->alloc_nr], pai->page_count);
1102 case CUSE_IOCTL_FREE_MEMORY:
1105 if (pai->alloc_nr >= CUSE_ALLOC_UNIT_MAX) {
1109 /* we trust the character device driver in this case */
1112 if (cuse_mem[pai->alloc_nr].owner == pcs) {
1113 cuse_mem[pai->alloc_nr].is_allocated = 0;
1114 cuse_mem[pai->alloc_nr].owner = NULL;
1121 case CUSE_IOCTL_GET_SIG:
1124 pccmd = cuse_server_find_command(pcs, curthread);
1126 if (pccmd != NULL) {
1127 n = pccmd->got_signal;
1128 pccmd->got_signal = 0;
1138 case CUSE_IOCTL_SET_PFH:
1141 pccmd = cuse_server_find_command(pcs, curthread);
1143 if (pccmd != NULL) {
1144 pcc = pccmd->client;
1145 for (n = 0; n != CUSE_CMD_MAX; n++) {
1146 pcc->cmds[n].sub.per_file_handle = *(uintptr_t *)data;
1154 case CUSE_IOCTL_CREATE_DEV:
1156 error = priv_check(curthread, PRIV_DRIVER);
1164 pcd->devname[sizeof(pcd->devname) - 1] = 0;
1166 if (pcd->devname[0] == 0) {
1170 cuse_str_filter(pcd->devname);
1172 pcd->permissions &= 0777;
1174 /* try to allocate a character device */
1176 pcsd = malloc(sizeof(*pcsd), M_CUSE, M_WAITOK | M_ZERO);
1184 pcsd->user_dev = pcd->dev;
1186 pcsd->kern_dev = make_dev_credf(MAKEDEV_CHECKNAME,
1187 &cuse_client_devsw, 0, NULL, pcd->user_id, pcd->group_id,
1188 pcd->permissions, "%s", pcd->devname);
1190 if (pcsd->kern_dev == NULL) {
1195 pcsd->kern_dev->si_drv1 = pcsd;
1198 TAILQ_INSERT_TAIL(&pcs->hdev, pcsd, entry);
1203 case CUSE_IOCTL_DESTROY_DEV:
1205 error = priv_check(curthread, PRIV_DRIVER);
1213 pcsd = TAILQ_FIRST(&pcs->hdev);
1214 while (pcsd != NULL) {
1215 if (pcsd->user_dev == *(struct cuse_dev **)data) {
1216 TAILQ_REMOVE(&pcs->hdev, pcsd, entry);
1218 cuse_server_free_dev(pcsd);
1221 pcsd = TAILQ_FIRST(&pcs->hdev);
1223 pcsd = TAILQ_NEXT(pcsd, entry);
1230 case CUSE_IOCTL_WRITE_DATA:
1231 case CUSE_IOCTL_READ_DATA:
1234 pchk = (struct cuse_data_chunk *)data;
1236 pccmd = cuse_server_find_command(pcs, curthread);
1238 if (pccmd == NULL) {
1239 error = ENXIO; /* invalid request */
1240 } else if (pchk->peer_ptr < CUSE_BUF_MIN_PTR) {
1241 error = EFAULT; /* NULL pointer */
1242 } else if (pchk->peer_ptr < CUSE_BUF_MAX_PTR) {
1243 error = cuse_server_ioctl_copy_locked(pccmd,
1244 pchk, cmd == CUSE_IOCTL_READ_DATA);
1246 error = cuse_server_data_copy_locked(pccmd,
1247 pchk, cmd == CUSE_IOCTL_READ_DATA);
1252 case CUSE_IOCTL_SELWAKEUP:
1255 * We don't know which direction caused the event.
1258 cuse_server_wakeup_all_client_locked(pcs);
1270 cuse_server_poll(struct cdev *dev, int events, struct thread *td)
1272 return (events & (POLLHUP | POLLPRI | POLLIN |
1273 POLLRDNORM | POLLOUT | POLLWRNORM));
1277 cuse_server_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr)
1279 uint32_t page_nr = offset / PAGE_SIZE;
1280 uint32_t alloc_nr = page_nr / CUSE_ALLOC_PAGES_MAX;
1281 struct cuse_memory *mem;
1282 struct cuse_server *pcs;
1286 if (alloc_nr >= CUSE_ALLOC_UNIT_MAX)
1289 error = cuse_server_get(&pcs);
1294 mem = &cuse_mem[alloc_nr];
1296 /* try to enforce slight ownership */
1297 if ((pcs != NULL) && (mem->owner != pcs)) {
1301 if (mem->virtaddr == NULL) {
1305 if (mem->virtaddr == NBUSY) {
1309 page_nr %= CUSE_ALLOC_PAGES_MAX;
1311 if (page_nr >= mem->page_count) {
1315 ptr = mem->virtaddr + (page_nr * PAGE_SIZE);
1318 *paddr = vtophys(ptr);
1323 /*------------------------------------------------------------------------*
1325 *------------------------------------------------------------------------*/
1327 cuse_client_free(void *arg)
1329 struct cuse_client *pcc = arg;
1330 struct cuse_client_command *pccmd;
1331 struct cuse_server *pcs;
1335 cuse_client_is_closing(pcc);
1336 TAILQ_REMOVE(&pcc->server->hcli, pcc, entry);
1339 for (n = 0; n != CUSE_CMD_MAX; n++) {
1341 pccmd = &pcc->cmds[n];
1343 sx_destroy(&pccmd->sx);
1344 cv_destroy(&pccmd->cv);
1351 /* drop reference on server */
1352 cuse_server_free(pcs);
1356 cuse_client_open(struct cdev *dev, int fflags, int devtype, struct thread *td)
1358 struct cuse_client_command *pccmd;
1359 struct cuse_server_dev *pcsd;
1360 struct cuse_client *pcc;
1361 struct cuse_server *pcs;
1362 struct cuse_dev *pcd;
1367 pcsd = dev->si_drv1;
1370 pcd = pcsd->user_dev;
1372 * Check that the refcount didn't wrap and that the
1373 * same process is not both client and server. This
1374 * can easily lead to deadlocks when destroying the
1375 * CUSE character device nodes:
1378 if (pcs->refs < 0 || pcs->pid == curproc->p_pid) {
1379 /* overflow or wrong PID */
1392 pcc = malloc(sizeof(*pcc), M_CUSE, M_WAITOK | M_ZERO);
1394 /* drop reference on server */
1395 cuse_server_free(pcs);
1398 if (devfs_set_cdevpriv(pcc, &cuse_client_free)) {
1399 printf("Cuse: Cannot set cdevpriv.\n");
1400 /* drop reference on server */
1401 cuse_server_free(pcs);
1405 pcc->fflags = fflags;
1406 pcc->server_dev = pcsd;
1409 for (n = 0; n != CUSE_CMD_MAX; n++) {
1411 pccmd = &pcc->cmds[n];
1413 pccmd->sub.dev = pcd;
1414 pccmd->sub.command = n;
1415 pccmd->client = pcc;
1417 sx_init(&pccmd->sx, "cuse-client-sx");
1418 cv_init(&pccmd->cv, "cuse-client-cv");
1423 /* cuse_client_free() assumes that the client is listed somewhere! */
1424 /* always enqueue */
1426 TAILQ_INSERT_TAIL(&pcs->hcli, pcc, entry);
1428 /* check if server is closing */
1429 if ((pcs->is_closing != 0) || (dev->si_drv1 == NULL)) {
1437 devfs_clear_cdevpriv(); /* XXX bugfix */
1440 pccmd = &pcc->cmds[CUSE_CMD_OPEN];
1442 cuse_cmd_lock(pccmd);
1445 cuse_client_send_command_locked(pccmd, 0, 0, pcc->fflags, 0);
1447 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1451 error = cuse_convert_error(error);
1456 cuse_cmd_unlock(pccmd);
1459 devfs_clear_cdevpriv(); /* XXX bugfix */
1465 cuse_client_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
1467 struct cuse_client_command *pccmd;
1468 struct cuse_client *pcc;
1471 error = cuse_client_get(&pcc);
1475 pccmd = &pcc->cmds[CUSE_CMD_CLOSE];
1477 cuse_cmd_lock(pccmd);
1480 cuse_client_send_command_locked(pccmd, 0, 0, pcc->fflags, 0);
1482 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1485 cuse_cmd_unlock(pccmd);
1488 cuse_client_is_closing(pcc);
1495 cuse_client_kqfilter_poll(struct cdev *dev, struct cuse_client *pcc)
1500 temp = (pcc->cflags & (CUSE_CLI_KNOTE_HAS_READ |
1501 CUSE_CLI_KNOTE_HAS_WRITE));
1502 pcc->cflags &= ~(CUSE_CLI_KNOTE_NEED_READ |
1503 CUSE_CLI_KNOTE_NEED_WRITE);
1507 /* get the latest polling state from the server */
1508 temp = cuse_client_poll(dev, POLLIN | POLLOUT, NULL);
1511 if (temp & (POLLIN | POLLOUT)) {
1513 pcc->cflags |= CUSE_CLI_KNOTE_NEED_READ;
1515 pcc->cflags |= CUSE_CLI_KNOTE_NEED_WRITE;
1517 /* make sure the "knote" gets woken up */
1518 cuse_server_wakeup_locked(pcc->server);
1525 cuse_client_read(struct cdev *dev, struct uio *uio, int ioflag)
1527 struct cuse_client_command *pccmd;
1528 struct cuse_client *pcc;
1532 error = cuse_client_get(&pcc);
1536 pccmd = &pcc->cmds[CUSE_CMD_READ];
1538 if (uio->uio_segflg != UIO_USERSPACE) {
1541 uio->uio_segflg = UIO_NOCOPY;
1543 cuse_cmd_lock(pccmd);
1545 while (uio->uio_resid != 0) {
1547 if (uio->uio_iov->iov_len > CUSE_LENGTH_MAX) {
1552 len = uio->uio_iov->iov_len;
1555 cuse_client_send_command_locked(pccmd,
1556 (uintptr_t)uio->uio_iov->iov_base,
1557 (unsigned long)(unsigned int)len, pcc->fflags, ioflag);
1559 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1563 error = cuse_convert_error(error);
1565 } else if (error == len) {
1566 error = uiomove(NULL, error, uio);
1570 error = uiomove(NULL, error, uio);
1574 cuse_cmd_unlock(pccmd);
1576 uio->uio_segflg = UIO_USERSPACE;/* restore segment flag */
1578 if (error == EWOULDBLOCK)
1579 cuse_client_kqfilter_poll(dev, pcc);
1585 cuse_client_write(struct cdev *dev, struct uio *uio, int ioflag)
1587 struct cuse_client_command *pccmd;
1588 struct cuse_client *pcc;
1592 error = cuse_client_get(&pcc);
1596 pccmd = &pcc->cmds[CUSE_CMD_WRITE];
1598 if (uio->uio_segflg != UIO_USERSPACE) {
1601 uio->uio_segflg = UIO_NOCOPY;
1603 cuse_cmd_lock(pccmd);
1605 while (uio->uio_resid != 0) {
1607 if (uio->uio_iov->iov_len > CUSE_LENGTH_MAX) {
1612 len = uio->uio_iov->iov_len;
1615 cuse_client_send_command_locked(pccmd,
1616 (uintptr_t)uio->uio_iov->iov_base,
1617 (unsigned long)(unsigned int)len, pcc->fflags, ioflag);
1619 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1623 error = cuse_convert_error(error);
1625 } else if (error == len) {
1626 error = uiomove(NULL, error, uio);
1630 error = uiomove(NULL, error, uio);
1634 cuse_cmd_unlock(pccmd);
1636 uio->uio_segflg = UIO_USERSPACE;/* restore segment flag */
1638 if (error == EWOULDBLOCK)
1639 cuse_client_kqfilter_poll(dev, pcc);
1645 cuse_client_ioctl(struct cdev *dev, unsigned long cmd,
1646 caddr_t data, int fflag, struct thread *td)
1648 struct cuse_client_command *pccmd;
1649 struct cuse_client *pcc;
1653 error = cuse_client_get(&pcc);
1657 len = IOCPARM_LEN(cmd);
1658 if (len > CUSE_BUFFER_MAX)
1661 pccmd = &pcc->cmds[CUSE_CMD_IOCTL];
1663 cuse_cmd_lock(pccmd);
1665 if (cmd & (IOC_IN | IOC_VOID))
1666 memcpy(pcc->ioctl_buffer, data, len);
1669 * When the ioctl-length is zero drivers can pass information
1670 * through the data pointer of the ioctl. Make sure this information
1671 * is forwarded to the driver.
1675 cuse_client_send_command_locked(pccmd,
1676 (len == 0) ? *(long *)data : CUSE_BUF_MIN_PTR,
1677 (unsigned long)cmd, pcc->fflags,
1678 (fflag & O_NONBLOCK) ? IO_NDELAY : 0);
1680 error = cuse_client_receive_command_locked(pccmd, data, len);
1684 error = cuse_convert_error(error);
1690 memcpy(data, pcc->ioctl_buffer, len);
1692 cuse_cmd_unlock(pccmd);
1694 if (error == EWOULDBLOCK)
1695 cuse_client_kqfilter_poll(dev, pcc);
1701 cuse_client_poll(struct cdev *dev, int events, struct thread *td)
1703 struct cuse_client_command *pccmd;
1704 struct cuse_client *pcc;
1709 error = cuse_client_get(&pcc);
1715 if (events & (POLLPRI | POLLIN | POLLRDNORM))
1716 temp |= CUSE_POLL_READ;
1718 if (events & (POLLOUT | POLLWRNORM))
1719 temp |= CUSE_POLL_WRITE;
1721 if (events & POLLHUP)
1722 temp |= CUSE_POLL_ERROR;
1724 pccmd = &pcc->cmds[CUSE_CMD_POLL];
1726 cuse_cmd_lock(pccmd);
1728 /* Need to selrecord() first to not loose any events. */
1729 if (temp != 0 && td != NULL)
1730 selrecord(td, &pcc->server->selinfo);
1733 cuse_client_send_command_locked(pccmd,
1734 0, temp, pcc->fflags, IO_NDELAY);
1736 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1739 cuse_cmd_unlock(pccmd);
1745 if (error & CUSE_POLL_READ)
1746 revents |= (events & (POLLPRI | POLLIN | POLLRDNORM));
1747 if (error & CUSE_POLL_WRITE)
1748 revents |= (events & (POLLOUT | POLLWRNORM));
1749 if (error & CUSE_POLL_ERROR)
1750 revents |= (events & POLLHUP);
1755 /* XXX many clients don't understand POLLNVAL */
1756 return (events & (POLLHUP | POLLPRI | POLLIN |
1757 POLLRDNORM | POLLOUT | POLLWRNORM));
1761 cuse_client_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr)
1763 uint32_t page_nr = offset / PAGE_SIZE;
1764 uint32_t alloc_nr = page_nr / CUSE_ALLOC_PAGES_MAX;
1765 struct cuse_memory *mem;
1766 struct cuse_server *pcs;
1767 struct cuse_client *pcc;
1771 if (alloc_nr >= CUSE_ALLOC_UNIT_MAX)
1774 error = cuse_client_get(&pcc);
1781 mem = &cuse_mem[alloc_nr];
1783 /* try to enforce slight ownership */
1784 if ((pcs != NULL) && (mem->owner != pcs)) {
1788 if (mem->virtaddr == NULL) {
1792 if (mem->virtaddr == NBUSY) {
1796 page_nr %= CUSE_ALLOC_PAGES_MAX;
1798 if (page_nr >= mem->page_count) {
1802 ptr = mem->virtaddr + (page_nr * PAGE_SIZE);
1805 *paddr = vtophys(ptr);
1811 cuse_client_kqfilter_read_detach(struct knote *kn)
1813 struct cuse_client *pcc;
1817 knlist_remove(&pcc->server->selinfo.si_note, kn, 1);
1822 cuse_client_kqfilter_write_detach(struct knote *kn)
1824 struct cuse_client *pcc;
1828 knlist_remove(&pcc->server->selinfo.si_note, kn, 1);
1833 cuse_client_kqfilter_read_event(struct knote *kn, long hint)
1835 struct cuse_client *pcc;
1837 mtx_assert(&cuse_mtx, MA_OWNED);
1840 return ((pcc->cflags & CUSE_CLI_KNOTE_NEED_READ) ? 1 : 0);
1844 cuse_client_kqfilter_write_event(struct knote *kn, long hint)
1846 struct cuse_client *pcc;
1848 mtx_assert(&cuse_mtx, MA_OWNED);
1851 return ((pcc->cflags & CUSE_CLI_KNOTE_NEED_WRITE) ? 1 : 0);
1855 cuse_client_kqfilter(struct cdev *dev, struct knote *kn)
1857 struct cuse_client *pcc;
1858 struct cuse_server *pcs;
1861 error = cuse_client_get(&pcc);
1867 switch (kn->kn_filter) {
1869 pcc->cflags |= CUSE_CLI_KNOTE_HAS_READ;
1871 kn->kn_fop = &cuse_client_kqfilter_read_ops;
1872 knlist_add(&pcs->selinfo.si_note, kn, 1);
1875 pcc->cflags |= CUSE_CLI_KNOTE_HAS_WRITE;
1877 kn->kn_fop = &cuse_client_kqfilter_write_ops;
1878 knlist_add(&pcs->selinfo.si_note, kn, 1);
1887 cuse_client_kqfilter_poll(dev, pcc);