3 * Copyright (c) 2010-2017 Hans Petter Selasky. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include "opt_compat.h"
29 #include <sys/stdint.h>
30 #include <sys/stddef.h>
31 #include <sys/param.h>
32 #include <sys/types.h>
33 #include <sys/systm.h>
35 #include <sys/kernel.h>
37 #include <sys/linker_set.h>
38 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/condvar.h>
42 #include <sys/sysctl.h>
43 #include <sys/unistd.h>
44 #include <sys/malloc.h>
49 #include <sys/rwlock.h>
50 #include <sys/queue.h>
51 #include <sys/fcntl.h>
53 #include <sys/vnode.h>
54 #include <sys/selinfo.h>
55 #include <sys/ptrace.h>
57 #include <machine/bus.h>
61 #include <vm/vm_object.h>
62 #include <vm/vm_page.h>
63 #include <vm/vm_pager.h>
65 #include <fs/cuse/cuse_defs.h>
66 #include <fs/cuse/cuse_ioctl.h>
68 MODULE_VERSION(cuse, 1);
71 * Prevent cuse4bsd.ko and cuse.ko from loading at the same time by
72 * declaring support for the cuse4bsd interface in cuse.ko:
74 MODULE_VERSION(cuse4bsd, 1);
77 FEATURE(cuse, "Userspace character devices");
84 struct cuse_client_command {
85 TAILQ_ENTRY(cuse_client_command) entry;
86 struct cuse_command sub;
89 struct thread *entered;
90 struct cuse_client *client;
91 struct proc *proc_curr;
99 TAILQ_ENTRY(cuse_memory) entry;
105 struct cuse_server_dev {
106 TAILQ_ENTRY(cuse_server_dev) entry;
107 struct cuse_server *server;
108 struct cdev *kern_dev;
109 struct cuse_dev *user_dev;
113 TAILQ_ENTRY(cuse_server) entry;
114 TAILQ_HEAD(, cuse_client_command) head;
115 TAILQ_HEAD(, cuse_server_dev) hdev;
116 TAILQ_HEAD(, cuse_client) hcli;
117 TAILQ_HEAD(, cuse_memory) hmem;
119 struct selinfo selinfo;
126 TAILQ_ENTRY(cuse_client) entry;
127 TAILQ_ENTRY(cuse_client) entry_ref;
128 struct cuse_client_command cmds[CUSE_CMD_MAX];
129 struct cuse_server *server;
130 struct cuse_server_dev *server_dev;
132 uint8_t ioctl_buffer[CUSE_BUFFER_MAX] __aligned(4);
134 int fflags; /* file flags */
135 int cflags; /* client flags */
136 #define CUSE_CLI_IS_CLOSING 0x01
137 #define CUSE_CLI_KNOTE_NEED_READ 0x02
138 #define CUSE_CLI_KNOTE_NEED_WRITE 0x04
139 #define CUSE_CLI_KNOTE_HAS_READ 0x08
140 #define CUSE_CLI_KNOTE_HAS_WRITE 0x10
143 #define CUSE_CLIENT_CLOSING(pcc) \
144 ((pcc)->cflags & CUSE_CLI_IS_CLOSING)
146 static MALLOC_DEFINE(M_CUSE, "cuse", "CUSE memory");
148 static TAILQ_HEAD(, cuse_server) cuse_server_head;
149 static struct mtx cuse_mtx;
150 static struct cdev *cuse_dev;
151 static struct cuse_server *cuse_alloc_unit[CUSE_DEVICES_MAX];
152 static int cuse_alloc_unit_id[CUSE_DEVICES_MAX];
154 static void cuse_server_wakeup_all_client_locked(struct cuse_server *pcs);
155 static void cuse_client_kqfilter_read_detach(struct knote *kn);
156 static void cuse_client_kqfilter_write_detach(struct knote *kn);
157 static int cuse_client_kqfilter_read_event(struct knote *kn, long hint);
158 static int cuse_client_kqfilter_write_event(struct knote *kn, long hint);
160 static struct filterops cuse_client_kqfilter_read_ops = {
162 .f_detach = cuse_client_kqfilter_read_detach,
163 .f_event = cuse_client_kqfilter_read_event,
166 static struct filterops cuse_client_kqfilter_write_ops = {
168 .f_detach = cuse_client_kqfilter_write_detach,
169 .f_event = cuse_client_kqfilter_write_event,
172 static d_open_t cuse_client_open;
173 static d_close_t cuse_client_close;
174 static d_ioctl_t cuse_client_ioctl;
175 static d_read_t cuse_client_read;
176 static d_write_t cuse_client_write;
177 static d_poll_t cuse_client_poll;
178 static d_mmap_single_t cuse_client_mmap_single;
179 static d_kqfilter_t cuse_client_kqfilter;
181 static struct cdevsw cuse_client_devsw = {
182 .d_version = D_VERSION,
183 .d_open = cuse_client_open,
184 .d_close = cuse_client_close,
185 .d_ioctl = cuse_client_ioctl,
186 .d_name = "cuse_client",
187 .d_flags = D_TRACKCLOSE,
188 .d_read = cuse_client_read,
189 .d_write = cuse_client_write,
190 .d_poll = cuse_client_poll,
191 .d_mmap_single = cuse_client_mmap_single,
192 .d_kqfilter = cuse_client_kqfilter,
195 static d_open_t cuse_server_open;
196 static d_close_t cuse_server_close;
197 static d_ioctl_t cuse_server_ioctl;
198 static d_read_t cuse_server_read;
199 static d_write_t cuse_server_write;
200 static d_poll_t cuse_server_poll;
201 static d_mmap_single_t cuse_server_mmap_single;
203 static struct cdevsw cuse_server_devsw = {
204 .d_version = D_VERSION,
205 .d_open = cuse_server_open,
206 .d_close = cuse_server_close,
207 .d_ioctl = cuse_server_ioctl,
208 .d_name = "cuse_server",
209 .d_flags = D_TRACKCLOSE,
210 .d_read = cuse_server_read,
211 .d_write = cuse_server_write,
212 .d_poll = cuse_server_poll,
213 .d_mmap_single = cuse_server_mmap_single,
216 static void cuse_client_is_closing(struct cuse_client *);
217 static int cuse_free_unit_by_id_locked(struct cuse_server *, int);
228 mtx_unlock(&cuse_mtx);
232 cuse_cmd_lock(struct cuse_client_command *pccmd)
234 sx_xlock(&pccmd->sx);
238 cuse_cmd_unlock(struct cuse_client_command *pccmd)
240 sx_xunlock(&pccmd->sx);
244 cuse_kern_init(void *arg)
246 TAILQ_INIT(&cuse_server_head);
248 mtx_init(&cuse_mtx, "cuse-mtx", NULL, MTX_DEF);
250 cuse_dev = make_dev(&cuse_server_devsw, 0,
251 UID_ROOT, GID_OPERATOR, 0600, "cuse");
253 printf("Cuse v%d.%d.%d @ /dev/cuse\n",
254 (CUSE_VERSION >> 16) & 0xFF, (CUSE_VERSION >> 8) & 0xFF,
255 (CUSE_VERSION >> 0) & 0xFF);
257 SYSINIT(cuse_kern_init, SI_SUB_DEVFS, SI_ORDER_ANY, cuse_kern_init, 0);
260 cuse_kern_uninit(void *arg)
266 printf("Cuse: Please exit all /dev/cuse instances "
267 "and processes which have used this device.\n");
269 pause("DRAIN", 2 * hz);
272 ptr = TAILQ_FIRST(&cuse_server_head);
279 if (cuse_dev != NULL)
280 destroy_dev(cuse_dev);
282 mtx_destroy(&cuse_mtx);
284 SYSUNINIT(cuse_kern_uninit, SI_SUB_DEVFS, SI_ORDER_ANY, cuse_kern_uninit, 0);
287 cuse_server_get(struct cuse_server **ppcs)
289 struct cuse_server *pcs;
292 error = devfs_get_cdevpriv((void **)&pcs);
297 /* check if closing */
299 if (pcs->is_closing) {
310 cuse_server_is_closing(struct cuse_server *pcs)
312 struct cuse_client *pcc;
319 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
320 cuse_client_is_closing(pcc);
324 static struct cuse_client_command *
325 cuse_server_find_command(struct cuse_server *pcs, struct thread *td)
327 struct cuse_client *pcc;
333 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
334 if (CUSE_CLIENT_CLOSING(pcc))
336 for (n = 0; n != CUSE_CMD_MAX; n++) {
337 if (pcc->cmds[n].entered == td)
338 return (&pcc->cmds[n]);
346 cuse_str_filter(char *ptr)
350 while (((c = *ptr) != 0)) {
352 if ((c >= 'a') && (c <= 'z')) {
356 if ((c >= 'A') && (c <= 'Z')) {
360 if ((c >= '0') && (c <= '9')) {
364 if ((c == '.') || (c == '_') || (c == '/')) {
375 cuse_convert_error(int error)
383 case CUSE_ERR_WOULDBLOCK:
384 return (EWOULDBLOCK);
385 case CUSE_ERR_INVALID:
387 case CUSE_ERR_NO_MEMORY:
391 case CUSE_ERR_SIGNAL:
393 case CUSE_ERR_NO_DEVICE:
401 cuse_vm_memory_free(struct cuse_memory *mem)
403 /* last user is gone - free */
404 vm_object_deallocate(mem->object);
406 /* free CUSE memory */
411 cuse_server_alloc_memory(struct cuse_server *pcs, uint32_t alloc_nr,
414 struct cuse_memory *temp;
415 struct cuse_memory *mem;
419 mem = malloc(sizeof(*mem), M_CUSE, M_WAITOK | M_ZERO);
423 object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * page_count,
424 VM_PROT_DEFAULT, 0, curthread->td_ucred);
425 if (object == NULL) {
431 /* check if allocation number already exists */
432 TAILQ_FOREACH(temp, &pcs->hmem, entry) {
433 if (temp->alloc_nr == alloc_nr)
441 mem->object = object;
442 mem->page_count = page_count;
443 mem->alloc_nr = alloc_nr;
444 TAILQ_INSERT_TAIL(&pcs->hmem, mem, entry);
450 vm_object_deallocate(object);
457 cuse_server_free_memory(struct cuse_server *pcs, uint32_t alloc_nr)
459 struct cuse_memory *mem;
462 TAILQ_FOREACH(mem, &pcs->hmem, entry) {
463 if (mem->alloc_nr == alloc_nr)
470 TAILQ_REMOVE(&pcs->hmem, mem, entry);
473 cuse_vm_memory_free(mem);
479 cuse_client_get(struct cuse_client **ppcc)
481 struct cuse_client *pcc;
484 /* try to get private data */
485 error = devfs_get_cdevpriv((void **)&pcc);
490 /* check if closing */
492 if (CUSE_CLIENT_CLOSING(pcc) || pcc->server->is_closing) {
503 cuse_client_is_closing(struct cuse_client *pcc)
505 struct cuse_client_command *pccmd;
508 if (CUSE_CLIENT_CLOSING(pcc))
511 pcc->cflags |= CUSE_CLI_IS_CLOSING;
512 pcc->server_dev = NULL;
514 for (n = 0; n != CUSE_CMD_MAX; n++) {
516 pccmd = &pcc->cmds[n];
518 if (pccmd->entry.tqe_prev != NULL) {
519 TAILQ_REMOVE(&pcc->server->head, pccmd, entry);
520 pccmd->entry.tqe_prev = NULL;
522 cv_broadcast(&pccmd->cv);
527 cuse_client_send_command_locked(struct cuse_client_command *pccmd,
528 uintptr_t data_ptr, unsigned long arg, int fflags, int ioflag)
530 unsigned long cuse_fflags = 0;
531 struct cuse_server *pcs;
534 cuse_fflags |= CUSE_FFLAG_READ;
537 cuse_fflags |= CUSE_FFLAG_WRITE;
539 if (ioflag & IO_NDELAY)
540 cuse_fflags |= CUSE_FFLAG_NONBLOCK;
542 pccmd->sub.fflags = cuse_fflags;
543 pccmd->sub.data_pointer = data_ptr;
544 pccmd->sub.argument = arg;
546 pcs = pccmd->client->server;
548 if ((pccmd->entry.tqe_prev == NULL) &&
549 (CUSE_CLIENT_CLOSING(pccmd->client) == 0) &&
550 (pcs->is_closing == 0)) {
551 TAILQ_INSERT_TAIL(&pcs->head, pccmd, entry);
557 cuse_client_got_signal(struct cuse_client_command *pccmd)
559 struct cuse_server *pcs;
561 pccmd->got_signal = 1;
563 pccmd = &pccmd->client->cmds[CUSE_CMD_SIGNAL];
565 pcs = pccmd->client->server;
567 if ((pccmd->entry.tqe_prev == NULL) &&
568 (CUSE_CLIENT_CLOSING(pccmd->client) == 0) &&
569 (pcs->is_closing == 0)) {
570 TAILQ_INSERT_TAIL(&pcs->head, pccmd, entry);
576 cuse_client_receive_command_locked(struct cuse_client_command *pccmd,
577 uint8_t *arg_ptr, uint32_t arg_len)
583 pccmd->proc_curr = curthread->td_proc;
585 if (CUSE_CLIENT_CLOSING(pccmd->client) ||
586 pccmd->client->server->is_closing) {
587 error = CUSE_ERR_OTHER;
590 while (pccmd->command == CUSE_CMD_NONE) {
592 cv_wait(&pccmd->cv, &cuse_mtx);
594 error = cv_wait_sig(&pccmd->cv, &cuse_mtx);
597 cuse_client_got_signal(pccmd);
599 if (CUSE_CLIENT_CLOSING(pccmd->client) ||
600 pccmd->client->server->is_closing) {
601 error = CUSE_ERR_OTHER;
606 error = pccmd->error;
607 pccmd->command = CUSE_CMD_NONE;
608 cv_signal(&pccmd->cv);
612 /* wait until all process references are gone */
614 pccmd->proc_curr = NULL;
616 while (pccmd->proc_refs != 0)
617 cv_wait(&pccmd->cv, &cuse_mtx);
622 /*------------------------------------------------------------------------*
624 *------------------------------------------------------------------------*/
627 cuse_server_free_dev(struct cuse_server_dev *pcsd)
629 struct cuse_server *pcs;
630 struct cuse_client *pcc;
632 /* get server pointer */
635 /* prevent creation of more devices */
637 if (pcsd->kern_dev != NULL)
638 pcsd->kern_dev->si_drv1 = NULL;
640 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
641 if (pcc->server_dev == pcsd)
642 cuse_client_is_closing(pcc);
646 /* destroy device, if any */
647 if (pcsd->kern_dev != NULL) {
648 /* destroy device synchronously */
649 destroy_dev(pcsd->kern_dev);
655 cuse_server_unref(struct cuse_server *pcs)
657 struct cuse_server_dev *pcsd;
658 struct cuse_memory *mem;
662 if (pcs->refs != 0) {
666 cuse_server_is_closing(pcs);
667 /* final client wakeup, if any */
668 cuse_server_wakeup_all_client_locked(pcs);
670 TAILQ_REMOVE(&cuse_server_head, pcs, entry);
672 cuse_free_unit_by_id_locked(pcs, -1);
674 while ((pcsd = TAILQ_FIRST(&pcs->hdev)) != NULL) {
675 TAILQ_REMOVE(&pcs->hdev, pcsd, entry);
677 cuse_server_free_dev(pcsd);
681 while ((mem = TAILQ_FIRST(&pcs->hmem)) != NULL) {
682 TAILQ_REMOVE(&pcs->hmem, mem, entry);
684 cuse_vm_memory_free(mem);
688 knlist_clear(&pcs->selinfo.si_note, 1);
689 knlist_destroy(&pcs->selinfo.si_note);
693 seldrain(&pcs->selinfo);
695 cv_destroy(&pcs->cv);
701 cuse_server_free(void *arg)
703 struct cuse_server *pcs = arg;
706 cuse_server_unref(pcs);
710 cuse_server_open(struct cdev *dev, int fflags, int devtype, struct thread *td)
712 struct cuse_server *pcs;
714 pcs = malloc(sizeof(*pcs), M_CUSE, M_WAITOK | M_ZERO);
718 if (devfs_set_cdevpriv(pcs, &cuse_server_free)) {
719 printf("Cuse: Cannot set cdevpriv.\n");
723 /* store current process ID */
724 pcs->pid = curproc->p_pid;
726 TAILQ_INIT(&pcs->head);
727 TAILQ_INIT(&pcs->hdev);
728 TAILQ_INIT(&pcs->hcli);
729 TAILQ_INIT(&pcs->hmem);
731 cv_init(&pcs->cv, "cuse-server-cv");
733 knlist_init_mtx(&pcs->selinfo.si_note, &cuse_mtx);
737 TAILQ_INSERT_TAIL(&cuse_server_head, pcs, entry);
744 cuse_server_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
746 struct cuse_server *pcs;
749 error = cuse_server_get(&pcs);
754 cuse_server_is_closing(pcs);
755 /* final client wakeup, if any */
756 cuse_server_wakeup_all_client_locked(pcs);
758 knlist_clear(&pcs->selinfo.si_note, 1);
766 cuse_server_read(struct cdev *dev, struct uio *uio, int ioflag)
772 cuse_server_write(struct cdev *dev, struct uio *uio, int ioflag)
778 cuse_server_ioctl_copy_locked(struct cuse_client_command *pccmd,
779 struct cuse_data_chunk *pchk, int isread)
785 offset = pchk->peer_ptr - CUSE_BUF_MIN_PTR;
787 if (pchk->length > CUSE_BUFFER_MAX)
790 if (offset >= CUSE_BUFFER_MAX)
793 if ((offset + pchk->length) > CUSE_BUFFER_MAX)
796 p_proc = pccmd->proc_curr;
800 if (pccmd->proc_refs < 0)
809 (void *)pchk->local_ptr,
810 pccmd->client->ioctl_buffer + offset,
814 pccmd->client->ioctl_buffer + offset,
815 (void *)pchk->local_ptr,
823 if (pccmd->proc_curr == NULL)
824 cv_signal(&pccmd->cv);
830 cuse_proc2proc_copy(struct proc *proc_s, vm_offset_t data_s,
831 struct proc *proc_d, vm_offset_t data_d, size_t len)
834 struct proc *proc_cur;
838 proc_cur = td->td_proc;
840 if (proc_cur == proc_d) {
842 .iov_base = (caddr_t)data_d,
848 .uio_offset = (off_t)data_s,
850 .uio_segflg = UIO_USERSPACE,
856 error = proc_rwmem(proc_s, &uio);
859 } else if (proc_cur == proc_s) {
861 .iov_base = (caddr_t)data_s,
867 .uio_offset = (off_t)data_d,
869 .uio_segflg = UIO_USERSPACE,
875 error = proc_rwmem(proc_d, &uio);
884 cuse_server_data_copy_locked(struct cuse_client_command *pccmd,
885 struct cuse_data_chunk *pchk, int isread)
890 p_proc = pccmd->proc_curr;
894 if (pccmd->proc_refs < 0)
902 error = cuse_proc2proc_copy(
903 curthread->td_proc, pchk->local_ptr,
904 p_proc, pchk->peer_ptr,
907 error = cuse_proc2proc_copy(
908 p_proc, pchk->peer_ptr,
909 curthread->td_proc, pchk->local_ptr,
917 if (pccmd->proc_curr == NULL)
918 cv_signal(&pccmd->cv);
924 cuse_alloc_unit_by_id_locked(struct cuse_server *pcs, int id)
931 for (match = n = 0; n != CUSE_DEVICES_MAX; n++) {
932 if (cuse_alloc_unit[n] != NULL) {
933 if ((cuse_alloc_unit_id[n] ^ id) & CUSE_ID_MASK)
935 if ((cuse_alloc_unit_id[n] & ~CUSE_ID_MASK) == x) {
944 for (n = 0; n != CUSE_DEVICES_MAX; n++) {
945 if (cuse_alloc_unit[n] == NULL) {
946 cuse_alloc_unit[n] = pcs;
947 cuse_alloc_unit_id[n] = id | x;
956 cuse_server_wakeup_locked(struct cuse_server *pcs)
958 selwakeup(&pcs->selinfo);
959 KNOTE_LOCKED(&pcs->selinfo.si_note, 0);
963 cuse_server_wakeup_all_client_locked(struct cuse_server *pcs)
965 struct cuse_client *pcc;
967 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
968 pcc->cflags |= (CUSE_CLI_KNOTE_NEED_READ |
969 CUSE_CLI_KNOTE_NEED_WRITE);
971 cuse_server_wakeup_locked(pcs);
975 cuse_free_unit_by_id_locked(struct cuse_server *pcs, int id)
980 for (n = 0; n != CUSE_DEVICES_MAX; n++) {
981 if (cuse_alloc_unit[n] == pcs) {
982 if (cuse_alloc_unit_id[n] == id || id == -1) {
983 cuse_alloc_unit[n] = NULL;
984 cuse_alloc_unit_id[n] = 0;
990 return (found ? 0 : EINVAL);
994 cuse_server_ioctl(struct cdev *dev, unsigned long cmd,
995 caddr_t data, int fflag, struct thread *td)
997 struct cuse_server *pcs;
1000 error = cuse_server_get(&pcs);
1005 struct cuse_client_command *pccmd;
1006 struct cuse_client *pcc;
1007 struct cuse_command *pcmd;
1008 struct cuse_alloc_info *pai;
1009 struct cuse_create_dev *pcd;
1010 struct cuse_server_dev *pcsd;
1011 struct cuse_data_chunk *pchk;
1014 case CUSE_IOCTL_GET_COMMAND:
1015 pcmd = (void *)data;
1019 while ((pccmd = TAILQ_FIRST(&pcs->head)) == NULL) {
1020 error = cv_wait_sig(&pcs->cv, &cuse_mtx);
1022 if (pcs->is_closing)
1031 TAILQ_REMOVE(&pcs->head, pccmd, entry);
1032 pccmd->entry.tqe_prev = NULL;
1034 pccmd->entered = curthread;
1042 case CUSE_IOCTL_SYNC_COMMAND:
1045 while ((pccmd = cuse_server_find_command(pcs, curthread)) != NULL) {
1047 /* send sync command */
1048 pccmd->entered = NULL;
1049 pccmd->error = *(int *)data;
1050 pccmd->command = CUSE_CMD_SYNC;
1052 /* signal peer, if any */
1053 cv_signal(&pccmd->cv);
1059 case CUSE_IOCTL_ALLOC_UNIT:
1062 n = cuse_alloc_unit_by_id_locked(pcs,
1063 CUSE_ID_DEFAULT(0));
1072 case CUSE_IOCTL_ALLOC_UNIT_BY_ID:
1076 n = (n & CUSE_ID_MASK);
1079 n = cuse_alloc_unit_by_id_locked(pcs, n);
1088 case CUSE_IOCTL_FREE_UNIT:
1092 n = CUSE_ID_DEFAULT(n);
1095 error = cuse_free_unit_by_id_locked(pcs, n);
1099 case CUSE_IOCTL_FREE_UNIT_BY_ID:
1104 error = cuse_free_unit_by_id_locked(pcs, n);
1108 case CUSE_IOCTL_ALLOC_MEMORY:
1112 if (pai->alloc_nr >= CUSE_ALLOC_UNIT_MAX) {
1116 if (pai->page_count >= CUSE_ALLOC_PAGES_MAX) {
1120 error = cuse_server_alloc_memory(pcs,
1121 pai->alloc_nr, pai->page_count);
1124 case CUSE_IOCTL_FREE_MEMORY:
1127 if (pai->alloc_nr >= CUSE_ALLOC_UNIT_MAX) {
1131 error = cuse_server_free_memory(pcs, pai->alloc_nr);
1134 case CUSE_IOCTL_GET_SIG:
1137 pccmd = cuse_server_find_command(pcs, curthread);
1139 if (pccmd != NULL) {
1140 n = pccmd->got_signal;
1141 pccmd->got_signal = 0;
1151 case CUSE_IOCTL_SET_PFH:
1154 pccmd = cuse_server_find_command(pcs, curthread);
1156 if (pccmd != NULL) {
1157 pcc = pccmd->client;
1158 for (n = 0; n != CUSE_CMD_MAX; n++) {
1159 pcc->cmds[n].sub.per_file_handle = *(uintptr_t *)data;
1167 case CUSE_IOCTL_CREATE_DEV:
1169 error = priv_check(curthread, PRIV_DRIVER);
1177 pcd->devname[sizeof(pcd->devname) - 1] = 0;
1179 if (pcd->devname[0] == 0) {
1183 cuse_str_filter(pcd->devname);
1185 pcd->permissions &= 0777;
1187 /* try to allocate a character device */
1189 pcsd = malloc(sizeof(*pcsd), M_CUSE, M_WAITOK | M_ZERO);
1197 pcsd->user_dev = pcd->dev;
1199 pcsd->kern_dev = make_dev_credf(MAKEDEV_CHECKNAME,
1200 &cuse_client_devsw, 0, NULL, pcd->user_id, pcd->group_id,
1201 pcd->permissions, "%s", pcd->devname);
1203 if (pcsd->kern_dev == NULL) {
1208 pcsd->kern_dev->si_drv1 = pcsd;
1211 TAILQ_INSERT_TAIL(&pcs->hdev, pcsd, entry);
1216 case CUSE_IOCTL_DESTROY_DEV:
1218 error = priv_check(curthread, PRIV_DRIVER);
1226 pcsd = TAILQ_FIRST(&pcs->hdev);
1227 while (pcsd != NULL) {
1228 if (pcsd->user_dev == *(struct cuse_dev **)data) {
1229 TAILQ_REMOVE(&pcs->hdev, pcsd, entry);
1231 cuse_server_free_dev(pcsd);
1234 pcsd = TAILQ_FIRST(&pcs->hdev);
1236 pcsd = TAILQ_NEXT(pcsd, entry);
1243 case CUSE_IOCTL_WRITE_DATA:
1244 case CUSE_IOCTL_READ_DATA:
1247 pchk = (struct cuse_data_chunk *)data;
1249 pccmd = cuse_server_find_command(pcs, curthread);
1251 if (pccmd == NULL) {
1252 error = ENXIO; /* invalid request */
1253 } else if (pchk->peer_ptr < CUSE_BUF_MIN_PTR) {
1254 error = EFAULT; /* NULL pointer */
1255 } else if (pchk->peer_ptr < CUSE_BUF_MAX_PTR) {
1256 error = cuse_server_ioctl_copy_locked(pccmd,
1257 pchk, cmd == CUSE_IOCTL_READ_DATA);
1259 error = cuse_server_data_copy_locked(pccmd,
1260 pchk, cmd == CUSE_IOCTL_READ_DATA);
1265 case CUSE_IOCTL_SELWAKEUP:
1268 * We don't know which direction caused the event.
1271 cuse_server_wakeup_all_client_locked(pcs);
1283 cuse_server_poll(struct cdev *dev, int events, struct thread *td)
1285 return (events & (POLLHUP | POLLPRI | POLLIN |
1286 POLLRDNORM | POLLOUT | POLLWRNORM));
1290 cuse_server_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
1291 vm_size_t size, struct vm_object **object, int nprot)
1293 uint32_t page_nr = *offset / PAGE_SIZE;
1294 uint32_t alloc_nr = page_nr / CUSE_ALLOC_PAGES_MAX;
1295 struct cuse_memory *mem;
1296 struct cuse_server *pcs;
1299 error = cuse_server_get(&pcs);
1304 /* lookup memory structure */
1305 TAILQ_FOREACH(mem, &pcs->hmem, entry) {
1306 if (mem->alloc_nr == alloc_nr)
1313 /* verify page offset */
1314 page_nr %= CUSE_ALLOC_PAGES_MAX;
1315 if (page_nr >= mem->page_count) {
1319 /* verify mmap size */
1320 if ((size % PAGE_SIZE) != 0 || (size < PAGE_SIZE) ||
1321 (size > ((mem->page_count - page_nr) * PAGE_SIZE))) {
1325 vm_object_reference(mem->object);
1326 *object = mem->object;
1329 /* set new VM object offset to use */
1330 *offset = page_nr * PAGE_SIZE;
1336 /*------------------------------------------------------------------------*
1338 *------------------------------------------------------------------------*/
1340 cuse_client_free(void *arg)
1342 struct cuse_client *pcc = arg;
1343 struct cuse_client_command *pccmd;
1344 struct cuse_server *pcs;
1348 cuse_client_is_closing(pcc);
1349 TAILQ_REMOVE(&pcc->server->hcli, pcc, entry);
1352 for (n = 0; n != CUSE_CMD_MAX; n++) {
1354 pccmd = &pcc->cmds[n];
1356 sx_destroy(&pccmd->sx);
1357 cv_destroy(&pccmd->cv);
1364 /* drop reference on server */
1365 cuse_server_unref(pcs);
1369 cuse_client_open(struct cdev *dev, int fflags, int devtype, struct thread *td)
1371 struct cuse_client_command *pccmd;
1372 struct cuse_server_dev *pcsd;
1373 struct cuse_client *pcc;
1374 struct cuse_server *pcs;
1375 struct cuse_dev *pcd;
1380 pcsd = dev->si_drv1;
1383 pcd = pcsd->user_dev;
1385 * Check that the refcount didn't wrap and that the
1386 * same process is not both client and server. This
1387 * can easily lead to deadlocks when destroying the
1388 * CUSE character device nodes:
1391 if (pcs->refs < 0 || pcs->pid == curproc->p_pid) {
1392 /* overflow or wrong PID */
1405 pcc = malloc(sizeof(*pcc), M_CUSE, M_WAITOK | M_ZERO);
1407 /* drop reference on server */
1408 cuse_server_unref(pcs);
1411 if (devfs_set_cdevpriv(pcc, &cuse_client_free)) {
1412 printf("Cuse: Cannot set cdevpriv.\n");
1413 /* drop reference on server */
1414 cuse_server_unref(pcs);
1418 pcc->fflags = fflags;
1419 pcc->server_dev = pcsd;
1422 for (n = 0; n != CUSE_CMD_MAX; n++) {
1424 pccmd = &pcc->cmds[n];
1426 pccmd->sub.dev = pcd;
1427 pccmd->sub.command = n;
1428 pccmd->client = pcc;
1430 sx_init(&pccmd->sx, "cuse-client-sx");
1431 cv_init(&pccmd->cv, "cuse-client-cv");
1436 /* cuse_client_free() assumes that the client is listed somewhere! */
1437 /* always enqueue */
1439 TAILQ_INSERT_TAIL(&pcs->hcli, pcc, entry);
1441 /* check if server is closing */
1442 if ((pcs->is_closing != 0) || (dev->si_drv1 == NULL)) {
1450 devfs_clear_cdevpriv(); /* XXX bugfix */
1453 pccmd = &pcc->cmds[CUSE_CMD_OPEN];
1455 cuse_cmd_lock(pccmd);
1458 cuse_client_send_command_locked(pccmd, 0, 0, pcc->fflags, 0);
1460 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1464 error = cuse_convert_error(error);
1469 cuse_cmd_unlock(pccmd);
1472 devfs_clear_cdevpriv(); /* XXX bugfix */
1478 cuse_client_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
1480 struct cuse_client_command *pccmd;
1481 struct cuse_client *pcc;
1484 error = cuse_client_get(&pcc);
1488 pccmd = &pcc->cmds[CUSE_CMD_CLOSE];
1490 cuse_cmd_lock(pccmd);
1493 cuse_client_send_command_locked(pccmd, 0, 0, pcc->fflags, 0);
1495 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1498 cuse_cmd_unlock(pccmd);
1501 cuse_client_is_closing(pcc);
1508 cuse_client_kqfilter_poll(struct cdev *dev, struct cuse_client *pcc)
1513 temp = (pcc->cflags & (CUSE_CLI_KNOTE_HAS_READ |
1514 CUSE_CLI_KNOTE_HAS_WRITE));
1515 pcc->cflags &= ~(CUSE_CLI_KNOTE_NEED_READ |
1516 CUSE_CLI_KNOTE_NEED_WRITE);
1520 /* get the latest polling state from the server */
1521 temp = cuse_client_poll(dev, POLLIN | POLLOUT, NULL);
1523 if (temp & (POLLIN | POLLOUT)) {
1526 pcc->cflags |= CUSE_CLI_KNOTE_NEED_READ;
1528 pcc->cflags |= CUSE_CLI_KNOTE_NEED_WRITE;
1530 /* make sure the "knote" gets woken up */
1531 cuse_server_wakeup_locked(pcc->server);
1538 cuse_client_read(struct cdev *dev, struct uio *uio, int ioflag)
1540 struct cuse_client_command *pccmd;
1541 struct cuse_client *pcc;
1545 error = cuse_client_get(&pcc);
1549 pccmd = &pcc->cmds[CUSE_CMD_READ];
1551 if (uio->uio_segflg != UIO_USERSPACE) {
1554 uio->uio_segflg = UIO_NOCOPY;
1556 cuse_cmd_lock(pccmd);
1558 while (uio->uio_resid != 0) {
1560 if (uio->uio_iov->iov_len > CUSE_LENGTH_MAX) {
1564 len = uio->uio_iov->iov_len;
1567 cuse_client_send_command_locked(pccmd,
1568 (uintptr_t)uio->uio_iov->iov_base,
1569 (unsigned long)(unsigned int)len, pcc->fflags, ioflag);
1571 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1575 error = cuse_convert_error(error);
1577 } else if (error == len) {
1578 error = uiomove(NULL, error, uio);
1582 error = uiomove(NULL, error, uio);
1586 cuse_cmd_unlock(pccmd);
1588 uio->uio_segflg = UIO_USERSPACE;/* restore segment flag */
1590 if (error == EWOULDBLOCK)
1591 cuse_client_kqfilter_poll(dev, pcc);
1597 cuse_client_write(struct cdev *dev, struct uio *uio, int ioflag)
1599 struct cuse_client_command *pccmd;
1600 struct cuse_client *pcc;
1604 error = cuse_client_get(&pcc);
1608 pccmd = &pcc->cmds[CUSE_CMD_WRITE];
1610 if (uio->uio_segflg != UIO_USERSPACE) {
1613 uio->uio_segflg = UIO_NOCOPY;
1615 cuse_cmd_lock(pccmd);
1617 while (uio->uio_resid != 0) {
1619 if (uio->uio_iov->iov_len > CUSE_LENGTH_MAX) {
1623 len = uio->uio_iov->iov_len;
1626 cuse_client_send_command_locked(pccmd,
1627 (uintptr_t)uio->uio_iov->iov_base,
1628 (unsigned long)(unsigned int)len, pcc->fflags, ioflag);
1630 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1634 error = cuse_convert_error(error);
1636 } else if (error == len) {
1637 error = uiomove(NULL, error, uio);
1641 error = uiomove(NULL, error, uio);
1645 cuse_cmd_unlock(pccmd);
1647 uio->uio_segflg = UIO_USERSPACE;/* restore segment flag */
1649 if (error == EWOULDBLOCK)
1650 cuse_client_kqfilter_poll(dev, pcc);
1656 cuse_client_ioctl(struct cdev *dev, unsigned long cmd,
1657 caddr_t data, int fflag, struct thread *td)
1659 struct cuse_client_command *pccmd;
1660 struct cuse_client *pcc;
1664 error = cuse_client_get(&pcc);
1668 len = IOCPARM_LEN(cmd);
1669 if (len > CUSE_BUFFER_MAX)
1672 pccmd = &pcc->cmds[CUSE_CMD_IOCTL];
1674 cuse_cmd_lock(pccmd);
1676 if (cmd & (IOC_IN | IOC_VOID))
1677 memcpy(pcc->ioctl_buffer, data, len);
1680 * When the ioctl-length is zero drivers can pass information
1681 * through the data pointer of the ioctl. Make sure this information
1682 * is forwarded to the driver.
1686 cuse_client_send_command_locked(pccmd,
1687 (len == 0) ? *(long *)data : CUSE_BUF_MIN_PTR,
1688 (unsigned long)cmd, pcc->fflags,
1689 (fflag & O_NONBLOCK) ? IO_NDELAY : 0);
1691 error = cuse_client_receive_command_locked(pccmd, data, len);
1695 error = cuse_convert_error(error);
1701 memcpy(data, pcc->ioctl_buffer, len);
1703 cuse_cmd_unlock(pccmd);
1705 if (error == EWOULDBLOCK)
1706 cuse_client_kqfilter_poll(dev, pcc);
1712 cuse_client_poll(struct cdev *dev, int events, struct thread *td)
1714 struct cuse_client_command *pccmd;
1715 struct cuse_client *pcc;
1720 error = cuse_client_get(&pcc);
1726 if (events & (POLLPRI | POLLIN | POLLRDNORM))
1727 temp |= CUSE_POLL_READ;
1729 if (events & (POLLOUT | POLLWRNORM))
1730 temp |= CUSE_POLL_WRITE;
1732 if (events & POLLHUP)
1733 temp |= CUSE_POLL_ERROR;
1735 pccmd = &pcc->cmds[CUSE_CMD_POLL];
1737 cuse_cmd_lock(pccmd);
1739 /* Need to selrecord() first to not loose any events. */
1740 if (temp != 0 && td != NULL)
1741 selrecord(td, &pcc->server->selinfo);
1744 cuse_client_send_command_locked(pccmd,
1745 0, temp, pcc->fflags, IO_NDELAY);
1747 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1750 cuse_cmd_unlock(pccmd);
1756 if (error & CUSE_POLL_READ)
1757 revents |= (events & (POLLPRI | POLLIN | POLLRDNORM));
1758 if (error & CUSE_POLL_WRITE)
1759 revents |= (events & (POLLOUT | POLLWRNORM));
1760 if (error & CUSE_POLL_ERROR)
1761 revents |= (events & POLLHUP);
1766 /* XXX many clients don't understand POLLNVAL */
1767 return (events & (POLLHUP | POLLPRI | POLLIN |
1768 POLLRDNORM | POLLOUT | POLLWRNORM));
1772 cuse_client_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
1773 vm_size_t size, struct vm_object **object, int nprot)
1775 uint32_t page_nr = *offset / PAGE_SIZE;
1776 uint32_t alloc_nr = page_nr / CUSE_ALLOC_PAGES_MAX;
1777 struct cuse_memory *mem;
1778 struct cuse_client *pcc;
1781 error = cuse_client_get(&pcc);
1786 /* lookup memory structure */
1787 TAILQ_FOREACH(mem, &pcc->server->hmem, entry) {
1788 if (mem->alloc_nr == alloc_nr)
1795 /* verify page offset */
1796 page_nr %= CUSE_ALLOC_PAGES_MAX;
1797 if (page_nr >= mem->page_count) {
1801 /* verify mmap size */
1802 if ((size % PAGE_SIZE) != 0 || (size < PAGE_SIZE) ||
1803 (size > ((mem->page_count - page_nr) * PAGE_SIZE))) {
1807 vm_object_reference(mem->object);
1808 *object = mem->object;
1811 /* set new VM object offset to use */
1812 *offset = page_nr * PAGE_SIZE;
1819 cuse_client_kqfilter_read_detach(struct knote *kn)
1821 struct cuse_client *pcc;
1825 knlist_remove(&pcc->server->selinfo.si_note, kn, 1);
1830 cuse_client_kqfilter_write_detach(struct knote *kn)
1832 struct cuse_client *pcc;
1836 knlist_remove(&pcc->server->selinfo.si_note, kn, 1);
1841 cuse_client_kqfilter_read_event(struct knote *kn, long hint)
1843 struct cuse_client *pcc;
1845 mtx_assert(&cuse_mtx, MA_OWNED);
1848 return ((pcc->cflags & CUSE_CLI_KNOTE_NEED_READ) ? 1 : 0);
1852 cuse_client_kqfilter_write_event(struct knote *kn, long hint)
1854 struct cuse_client *pcc;
1856 mtx_assert(&cuse_mtx, MA_OWNED);
1859 return ((pcc->cflags & CUSE_CLI_KNOTE_NEED_WRITE) ? 1 : 0);
1863 cuse_client_kqfilter(struct cdev *dev, struct knote *kn)
1865 struct cuse_client *pcc;
1866 struct cuse_server *pcs;
1869 error = cuse_client_get(&pcc);
1875 switch (kn->kn_filter) {
1877 pcc->cflags |= CUSE_CLI_KNOTE_HAS_READ;
1879 kn->kn_fop = &cuse_client_kqfilter_read_ops;
1880 knlist_add(&pcs->selinfo.si_note, kn, 1);
1883 pcc->cflags |= CUSE_CLI_KNOTE_HAS_WRITE;
1885 kn->kn_fop = &cuse_client_kqfilter_write_ops;
1886 knlist_add(&pcs->selinfo.si_note, kn, 1);
1895 cuse_client_kqfilter_poll(dev, pcc);