3 * Copyright (c) 2010-2017 Hans Petter Selasky. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/stdint.h>
28 #include <sys/stddef.h>
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/systm.h>
33 #include <sys/kernel.h>
35 #include <sys/linker_set.h>
36 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/condvar.h>
40 #include <sys/sysctl.h>
41 #include <sys/unistd.h>
42 #include <sys/malloc.h>
47 #include <sys/rwlock.h>
48 #include <sys/queue.h>
49 #include <sys/fcntl.h>
51 #include <sys/vnode.h>
52 #include <sys/selinfo.h>
53 #include <sys/ptrace.h>
54 #include <sys/sysent.h>
56 #include <machine/bus.h>
60 #include <vm/vm_object.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_pager.h>
64 #include <fs/cuse/cuse_defs.h>
65 #include <fs/cuse/cuse_ioctl.h>
67 MODULE_VERSION(cuse, 1);
70 * Prevent cuse4bsd.ko and cuse.ko from loading at the same time by
71 * declaring support for the cuse4bsd interface in cuse.ko:
73 MODULE_VERSION(cuse4bsd, 1);
76 FEATURE(cuse, "Userspace character devices");
83 struct cuse_client_command {
84 TAILQ_ENTRY(cuse_client_command) entry;
85 struct cuse_command sub;
88 struct thread *entered;
89 struct cuse_client *client;
90 struct proc *proc_curr;
98 TAILQ_ENTRY(cuse_memory) entry;
104 struct cuse_server_dev {
105 TAILQ_ENTRY(cuse_server_dev) entry;
106 struct cuse_server *server;
107 struct cdev *kern_dev;
108 struct cuse_dev *user_dev;
112 TAILQ_ENTRY(cuse_server) entry;
113 TAILQ_HEAD(, cuse_client_command) head;
114 TAILQ_HEAD(, cuse_server_dev) hdev;
115 TAILQ_HEAD(, cuse_client) hcli;
116 TAILQ_HEAD(, cuse_memory) hmem;
118 struct selinfo selinfo;
125 TAILQ_ENTRY(cuse_client) entry;
126 TAILQ_ENTRY(cuse_client) entry_ref;
127 struct cuse_client_command cmds[CUSE_CMD_MAX];
128 struct cuse_server *server;
129 struct cuse_server_dev *server_dev;
131 uint8_t ioctl_buffer[CUSE_BUFFER_MAX] __aligned(4);
133 int fflags; /* file flags */
134 int cflags; /* client flags */
135 #define CUSE_CLI_IS_CLOSING 0x01
136 #define CUSE_CLI_KNOTE_NEED_READ 0x02
137 #define CUSE_CLI_KNOTE_NEED_WRITE 0x04
138 #define CUSE_CLI_KNOTE_HAS_READ 0x08
139 #define CUSE_CLI_KNOTE_HAS_WRITE 0x10
142 #define CUSE_CLIENT_CLOSING(pcc) \
143 ((pcc)->cflags & CUSE_CLI_IS_CLOSING)
145 static MALLOC_DEFINE(M_CUSE, "cuse", "CUSE memory");
147 static TAILQ_HEAD(, cuse_server) cuse_server_head;
148 static struct mtx cuse_mtx;
149 static struct cdev *cuse_dev;
150 static struct cuse_server *cuse_alloc_unit[CUSE_DEVICES_MAX];
151 static int cuse_alloc_unit_id[CUSE_DEVICES_MAX];
153 static void cuse_server_wakeup_all_client_locked(struct cuse_server *pcs);
154 static void cuse_client_kqfilter_read_detach(struct knote *kn);
155 static void cuse_client_kqfilter_write_detach(struct knote *kn);
156 static int cuse_client_kqfilter_read_event(struct knote *kn, long hint);
157 static int cuse_client_kqfilter_write_event(struct knote *kn, long hint);
159 static struct filterops cuse_client_kqfilter_read_ops = {
161 .f_detach = cuse_client_kqfilter_read_detach,
162 .f_event = cuse_client_kqfilter_read_event,
165 static struct filterops cuse_client_kqfilter_write_ops = {
167 .f_detach = cuse_client_kqfilter_write_detach,
168 .f_event = cuse_client_kqfilter_write_event,
171 static d_open_t cuse_client_open;
172 static d_close_t cuse_client_close;
173 static d_ioctl_t cuse_client_ioctl;
174 static d_read_t cuse_client_read;
175 static d_write_t cuse_client_write;
176 static d_poll_t cuse_client_poll;
177 static d_mmap_single_t cuse_client_mmap_single;
178 static d_kqfilter_t cuse_client_kqfilter;
180 static struct cdevsw cuse_client_devsw = {
181 .d_version = D_VERSION,
182 .d_open = cuse_client_open,
183 .d_close = cuse_client_close,
184 .d_ioctl = cuse_client_ioctl,
185 .d_name = "cuse_client",
186 .d_flags = D_TRACKCLOSE,
187 .d_read = cuse_client_read,
188 .d_write = cuse_client_write,
189 .d_poll = cuse_client_poll,
190 .d_mmap_single = cuse_client_mmap_single,
191 .d_kqfilter = cuse_client_kqfilter,
194 static d_open_t cuse_server_open;
195 static d_close_t cuse_server_close;
196 static d_ioctl_t cuse_server_ioctl;
197 static d_read_t cuse_server_read;
198 static d_write_t cuse_server_write;
199 static d_poll_t cuse_server_poll;
200 static d_mmap_single_t cuse_server_mmap_single;
202 static struct cdevsw cuse_server_devsw = {
203 .d_version = D_VERSION,
204 .d_open = cuse_server_open,
205 .d_close = cuse_server_close,
206 .d_ioctl = cuse_server_ioctl,
207 .d_name = "cuse_server",
208 .d_flags = D_TRACKCLOSE,
209 .d_read = cuse_server_read,
210 .d_write = cuse_server_write,
211 .d_poll = cuse_server_poll,
212 .d_mmap_single = cuse_server_mmap_single,
215 static void cuse_client_is_closing(struct cuse_client *);
216 static int cuse_free_unit_by_id_locked(struct cuse_server *, int);
227 mtx_unlock(&cuse_mtx);
231 cuse_cmd_lock(struct cuse_client_command *pccmd)
233 sx_xlock(&pccmd->sx);
237 cuse_cmd_unlock(struct cuse_client_command *pccmd)
239 sx_xunlock(&pccmd->sx);
243 cuse_kern_init(void *arg)
245 TAILQ_INIT(&cuse_server_head);
247 mtx_init(&cuse_mtx, "cuse-mtx", NULL, MTX_DEF);
249 cuse_dev = make_dev(&cuse_server_devsw, 0,
250 UID_ROOT, GID_OPERATOR, 0600, "cuse");
252 printf("Cuse v%d.%d.%d @ /dev/cuse\n",
253 (CUSE_VERSION >> 16) & 0xFF, (CUSE_VERSION >> 8) & 0xFF,
254 (CUSE_VERSION >> 0) & 0xFF);
256 SYSINIT(cuse_kern_init, SI_SUB_DEVFS, SI_ORDER_ANY, cuse_kern_init, NULL);
259 cuse_kern_uninit(void *arg)
265 printf("Cuse: Please exit all /dev/cuse instances "
266 "and processes which have used this device.\n");
268 pause("DRAIN", 2 * hz);
271 ptr = TAILQ_FIRST(&cuse_server_head);
278 if (cuse_dev != NULL)
279 destroy_dev(cuse_dev);
281 mtx_destroy(&cuse_mtx);
283 SYSUNINIT(cuse_kern_uninit, SI_SUB_DEVFS, SI_ORDER_ANY, cuse_kern_uninit, 0);
286 cuse_server_get(struct cuse_server **ppcs)
288 struct cuse_server *pcs;
291 error = devfs_get_cdevpriv((void **)&pcs);
296 /* check if closing */
298 if (pcs->is_closing) {
309 cuse_server_is_closing(struct cuse_server *pcs)
311 struct cuse_client *pcc;
318 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
319 cuse_client_is_closing(pcc);
323 static struct cuse_client_command *
324 cuse_server_find_command(struct cuse_server *pcs, struct thread *td)
326 struct cuse_client *pcc;
332 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
333 if (CUSE_CLIENT_CLOSING(pcc))
335 for (n = 0; n != CUSE_CMD_MAX; n++) {
336 if (pcc->cmds[n].entered == td)
337 return (&pcc->cmds[n]);
345 cuse_str_filter(char *ptr)
349 while (((c = *ptr) != 0)) {
351 if ((c >= 'a') && (c <= 'z')) {
355 if ((c >= 'A') && (c <= 'Z')) {
359 if ((c >= '0') && (c <= '9')) {
363 if ((c == '.') || (c == '_') || (c == '/')) {
374 cuse_convert_error(int error)
382 case CUSE_ERR_WOULDBLOCK:
383 return (EWOULDBLOCK);
384 case CUSE_ERR_INVALID:
386 case CUSE_ERR_NO_MEMORY:
390 case CUSE_ERR_SIGNAL:
392 case CUSE_ERR_NO_DEVICE:
400 cuse_vm_memory_free(struct cuse_memory *mem)
402 /* last user is gone - free */
403 vm_object_deallocate(mem->object);
405 /* free CUSE memory */
410 cuse_server_alloc_memory(struct cuse_server *pcs, uint32_t alloc_nr,
413 struct cuse_memory *temp;
414 struct cuse_memory *mem;
418 mem = malloc(sizeof(*mem), M_CUSE, M_WAITOK | M_ZERO);
422 object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * page_count,
423 VM_PROT_DEFAULT, 0, curthread->td_ucred);
424 if (object == NULL) {
430 /* check if allocation number already exists */
431 TAILQ_FOREACH(temp, &pcs->hmem, entry) {
432 if (temp->alloc_nr == alloc_nr)
440 mem->object = object;
441 mem->page_count = page_count;
442 mem->alloc_nr = alloc_nr;
443 TAILQ_INSERT_TAIL(&pcs->hmem, mem, entry);
449 vm_object_deallocate(object);
456 cuse_server_free_memory(struct cuse_server *pcs, uint32_t alloc_nr)
458 struct cuse_memory *mem;
461 TAILQ_FOREACH(mem, &pcs->hmem, entry) {
462 if (mem->alloc_nr == alloc_nr)
469 TAILQ_REMOVE(&pcs->hmem, mem, entry);
472 cuse_vm_memory_free(mem);
478 cuse_client_get(struct cuse_client **ppcc)
480 struct cuse_client *pcc;
483 /* try to get private data */
484 error = devfs_get_cdevpriv((void **)&pcc);
489 /* check if closing */
491 if (CUSE_CLIENT_CLOSING(pcc) || pcc->server->is_closing) {
502 cuse_client_is_closing(struct cuse_client *pcc)
504 struct cuse_client_command *pccmd;
507 if (CUSE_CLIENT_CLOSING(pcc))
510 pcc->cflags |= CUSE_CLI_IS_CLOSING;
511 pcc->server_dev = NULL;
513 for (n = 0; n != CUSE_CMD_MAX; n++) {
515 pccmd = &pcc->cmds[n];
517 if (pccmd->entry.tqe_prev != NULL) {
518 TAILQ_REMOVE(&pcc->server->head, pccmd, entry);
519 pccmd->entry.tqe_prev = NULL;
521 cv_broadcast(&pccmd->cv);
526 cuse_client_send_command_locked(struct cuse_client_command *pccmd,
527 uintptr_t data_ptr, unsigned long arg, int fflags, int ioflag)
529 unsigned long cuse_fflags = 0;
530 struct cuse_server *pcs;
533 cuse_fflags |= CUSE_FFLAG_READ;
536 cuse_fflags |= CUSE_FFLAG_WRITE;
538 if (ioflag & IO_NDELAY)
539 cuse_fflags |= CUSE_FFLAG_NONBLOCK;
540 #if defined(__LP64__)
541 if (SV_CURPROC_FLAG(SV_ILP32))
542 cuse_fflags |= CUSE_FFLAG_COMPAT32;
544 pccmd->sub.fflags = cuse_fflags;
545 pccmd->sub.data_pointer = data_ptr;
546 pccmd->sub.argument = arg;
548 pcs = pccmd->client->server;
550 if ((pccmd->entry.tqe_prev == NULL) &&
551 (CUSE_CLIENT_CLOSING(pccmd->client) == 0) &&
552 (pcs->is_closing == 0)) {
553 TAILQ_INSERT_TAIL(&pcs->head, pccmd, entry);
559 cuse_client_got_signal(struct cuse_client_command *pccmd)
561 struct cuse_server *pcs;
563 pccmd->got_signal = 1;
565 pccmd = &pccmd->client->cmds[CUSE_CMD_SIGNAL];
567 pcs = pccmd->client->server;
569 if ((pccmd->entry.tqe_prev == NULL) &&
570 (CUSE_CLIENT_CLOSING(pccmd->client) == 0) &&
571 (pcs->is_closing == 0)) {
572 TAILQ_INSERT_TAIL(&pcs->head, pccmd, entry);
578 cuse_client_receive_command_locked(struct cuse_client_command *pccmd,
579 uint8_t *arg_ptr, uint32_t arg_len)
585 pccmd->proc_curr = curthread->td_proc;
587 if (CUSE_CLIENT_CLOSING(pccmd->client) ||
588 pccmd->client->server->is_closing) {
589 error = CUSE_ERR_OTHER;
592 while (pccmd->command == CUSE_CMD_NONE) {
594 cv_wait(&pccmd->cv, &cuse_mtx);
596 error = cv_wait_sig(&pccmd->cv, &cuse_mtx);
599 cuse_client_got_signal(pccmd);
601 if (CUSE_CLIENT_CLOSING(pccmd->client) ||
602 pccmd->client->server->is_closing) {
603 error = CUSE_ERR_OTHER;
608 error = pccmd->error;
609 pccmd->command = CUSE_CMD_NONE;
610 cv_signal(&pccmd->cv);
614 /* wait until all process references are gone */
616 pccmd->proc_curr = NULL;
618 while (pccmd->proc_refs != 0)
619 cv_wait(&pccmd->cv, &cuse_mtx);
624 /*------------------------------------------------------------------------*
626 *------------------------------------------------------------------------*/
629 cuse_server_free_dev(struct cuse_server_dev *pcsd)
631 struct cuse_server *pcs;
632 struct cuse_client *pcc;
634 /* get server pointer */
637 /* prevent creation of more devices */
639 if (pcsd->kern_dev != NULL)
640 pcsd->kern_dev->si_drv1 = NULL;
642 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
643 if (pcc->server_dev == pcsd)
644 cuse_client_is_closing(pcc);
648 /* destroy device, if any */
649 if (pcsd->kern_dev != NULL) {
650 /* destroy device synchronously */
651 destroy_dev(pcsd->kern_dev);
657 cuse_server_unref(struct cuse_server *pcs)
659 struct cuse_server_dev *pcsd;
660 struct cuse_memory *mem;
664 if (pcs->refs != 0) {
668 cuse_server_is_closing(pcs);
669 /* final client wakeup, if any */
670 cuse_server_wakeup_all_client_locked(pcs);
672 TAILQ_REMOVE(&cuse_server_head, pcs, entry);
674 while ((pcsd = TAILQ_FIRST(&pcs->hdev)) != NULL) {
675 TAILQ_REMOVE(&pcs->hdev, pcsd, entry);
677 cuse_server_free_dev(pcsd);
681 cuse_free_unit_by_id_locked(pcs, -1);
683 while ((mem = TAILQ_FIRST(&pcs->hmem)) != NULL) {
684 TAILQ_REMOVE(&pcs->hmem, mem, entry);
686 cuse_vm_memory_free(mem);
690 knlist_clear(&pcs->selinfo.si_note, 1);
691 knlist_destroy(&pcs->selinfo.si_note);
695 seldrain(&pcs->selinfo);
697 cv_destroy(&pcs->cv);
703 cuse_server_do_close(struct cuse_server *pcs)
708 cuse_server_is_closing(pcs);
709 /* final client wakeup, if any */
710 cuse_server_wakeup_all_client_locked(pcs);
712 knlist_clear(&pcs->selinfo.si_note, 1);
721 cuse_server_free(void *arg)
723 struct cuse_server *pcs = arg;
726 * The final server unref should be done by the server thread
727 * to prevent deadlock in the client cdevpriv destructor,
728 * which cannot destroy itself.
730 while (cuse_server_do_close(pcs) != 1)
733 /* drop final refcount */
734 cuse_server_unref(pcs);
738 cuse_server_open(struct cdev *dev, int fflags, int devtype, struct thread *td)
740 struct cuse_server *pcs;
742 pcs = malloc(sizeof(*pcs), M_CUSE, M_WAITOK | M_ZERO);
746 if (devfs_set_cdevpriv(pcs, &cuse_server_free)) {
747 printf("Cuse: Cannot set cdevpriv.\n");
751 /* store current process ID */
752 pcs->pid = curproc->p_pid;
754 TAILQ_INIT(&pcs->head);
755 TAILQ_INIT(&pcs->hdev);
756 TAILQ_INIT(&pcs->hcli);
757 TAILQ_INIT(&pcs->hmem);
759 cv_init(&pcs->cv, "cuse-server-cv");
761 knlist_init_mtx(&pcs->selinfo.si_note, &cuse_mtx);
765 TAILQ_INSERT_TAIL(&cuse_server_head, pcs, entry);
772 cuse_server_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
774 struct cuse_server *pcs;
776 if (cuse_server_get(&pcs) == 0)
777 cuse_server_do_close(pcs);
783 cuse_server_read(struct cdev *dev, struct uio *uio, int ioflag)
789 cuse_server_write(struct cdev *dev, struct uio *uio, int ioflag)
795 cuse_server_ioctl_copy_locked(struct cuse_client_command *pccmd,
796 struct cuse_data_chunk *pchk, int isread)
802 offset = pchk->peer_ptr - CUSE_BUF_MIN_PTR;
804 if (pchk->length > CUSE_BUFFER_MAX)
807 if (offset >= CUSE_BUFFER_MAX)
810 if ((offset + pchk->length) > CUSE_BUFFER_MAX)
813 p_proc = pccmd->proc_curr;
817 if (pccmd->proc_refs < 0)
826 (void *)pchk->local_ptr,
827 pccmd->client->ioctl_buffer + offset,
831 pccmd->client->ioctl_buffer + offset,
832 (void *)pchk->local_ptr,
840 if (pccmd->proc_curr == NULL)
841 cv_signal(&pccmd->cv);
847 cuse_proc2proc_copy(struct proc *proc_s, vm_offset_t data_s,
848 struct proc *proc_d, vm_offset_t data_d, size_t len)
851 struct proc *proc_cur;
855 proc_cur = td->td_proc;
857 if (proc_cur == proc_d) {
859 .iov_base = (caddr_t)data_d,
865 .uio_offset = (off_t)data_s,
867 .uio_segflg = UIO_USERSPACE,
873 error = proc_rwmem(proc_s, &uio);
876 } else if (proc_cur == proc_s) {
878 .iov_base = (caddr_t)data_s,
884 .uio_offset = (off_t)data_d,
886 .uio_segflg = UIO_USERSPACE,
892 error = proc_rwmem(proc_d, &uio);
901 cuse_server_data_copy_locked(struct cuse_client_command *pccmd,
902 struct cuse_data_chunk *pchk, int isread)
907 p_proc = pccmd->proc_curr;
911 if (pccmd->proc_refs < 0)
919 error = cuse_proc2proc_copy(
920 curthread->td_proc, pchk->local_ptr,
921 p_proc, pchk->peer_ptr,
924 error = cuse_proc2proc_copy(
925 p_proc, pchk->peer_ptr,
926 curthread->td_proc, pchk->local_ptr,
934 if (pccmd->proc_curr == NULL)
935 cv_signal(&pccmd->cv);
941 cuse_alloc_unit_by_id_locked(struct cuse_server *pcs, int id)
948 for (match = n = 0; n != CUSE_DEVICES_MAX; n++) {
949 if (cuse_alloc_unit[n] != NULL) {
950 if ((cuse_alloc_unit_id[n] ^ id) & CUSE_ID_MASK)
952 if ((cuse_alloc_unit_id[n] & ~CUSE_ID_MASK) == x) {
961 for (n = 0; n != CUSE_DEVICES_MAX; n++) {
962 if (cuse_alloc_unit[n] == NULL) {
963 cuse_alloc_unit[n] = pcs;
964 cuse_alloc_unit_id[n] = id | x;
973 cuse_server_wakeup_locked(struct cuse_server *pcs)
975 selwakeup(&pcs->selinfo);
976 KNOTE_LOCKED(&pcs->selinfo.si_note, 0);
980 cuse_server_wakeup_all_client_locked(struct cuse_server *pcs)
982 struct cuse_client *pcc;
984 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
985 pcc->cflags |= (CUSE_CLI_KNOTE_NEED_READ |
986 CUSE_CLI_KNOTE_NEED_WRITE);
988 cuse_server_wakeup_locked(pcs);
992 cuse_free_unit_by_id_locked(struct cuse_server *pcs, int id)
997 for (n = 0; n != CUSE_DEVICES_MAX; n++) {
998 if (cuse_alloc_unit[n] == pcs) {
999 if (cuse_alloc_unit_id[n] == id || id == -1) {
1000 cuse_alloc_unit[n] = NULL;
1001 cuse_alloc_unit_id[n] = 0;
1007 return (found ? 0 : EINVAL);
1011 cuse_server_ioctl(struct cdev *dev, unsigned long cmd,
1012 caddr_t data, int fflag, struct thread *td)
1014 struct cuse_server *pcs;
1017 error = cuse_server_get(&pcs);
1022 struct cuse_client_command *pccmd;
1023 struct cuse_client *pcc;
1024 struct cuse_command *pcmd;
1025 struct cuse_alloc_info *pai;
1026 struct cuse_create_dev *pcd;
1027 struct cuse_server_dev *pcsd;
1028 struct cuse_data_chunk *pchk;
1031 case CUSE_IOCTL_GET_COMMAND:
1032 pcmd = (void *)data;
1036 while ((pccmd = TAILQ_FIRST(&pcs->head)) == NULL) {
1037 error = cv_wait_sig(&pcs->cv, &cuse_mtx);
1039 if (pcs->is_closing)
1048 TAILQ_REMOVE(&pcs->head, pccmd, entry);
1049 pccmd->entry.tqe_prev = NULL;
1051 pccmd->entered = curthread;
1059 case CUSE_IOCTL_SYNC_COMMAND:
1062 while ((pccmd = cuse_server_find_command(pcs, curthread)) != NULL) {
1064 /* send sync command */
1065 pccmd->entered = NULL;
1066 pccmd->error = *(int *)data;
1067 pccmd->command = CUSE_CMD_SYNC;
1069 /* signal peer, if any */
1070 cv_signal(&pccmd->cv);
1076 case CUSE_IOCTL_ALLOC_UNIT:
1079 n = cuse_alloc_unit_by_id_locked(pcs,
1080 CUSE_ID_DEFAULT(0));
1089 case CUSE_IOCTL_ALLOC_UNIT_BY_ID:
1093 n = (n & CUSE_ID_MASK);
1096 n = cuse_alloc_unit_by_id_locked(pcs, n);
1105 case CUSE_IOCTL_FREE_UNIT:
1109 n = CUSE_ID_DEFAULT(n);
1112 error = cuse_free_unit_by_id_locked(pcs, n);
1116 case CUSE_IOCTL_FREE_UNIT_BY_ID:
1121 error = cuse_free_unit_by_id_locked(pcs, n);
1125 case CUSE_IOCTL_ALLOC_MEMORY:
1129 if (pai->alloc_nr >= CUSE_ALLOC_UNIT_MAX) {
1133 if (pai->page_count >= CUSE_ALLOC_PAGES_MAX) {
1137 error = cuse_server_alloc_memory(pcs,
1138 pai->alloc_nr, pai->page_count);
1141 case CUSE_IOCTL_FREE_MEMORY:
1144 if (pai->alloc_nr >= CUSE_ALLOC_UNIT_MAX) {
1148 error = cuse_server_free_memory(pcs, pai->alloc_nr);
1151 case CUSE_IOCTL_GET_SIG:
1154 pccmd = cuse_server_find_command(pcs, curthread);
1156 if (pccmd != NULL) {
1157 n = pccmd->got_signal;
1158 pccmd->got_signal = 0;
1168 case CUSE_IOCTL_SET_PFH:
1171 pccmd = cuse_server_find_command(pcs, curthread);
1173 if (pccmd != NULL) {
1174 pcc = pccmd->client;
1175 for (n = 0; n != CUSE_CMD_MAX; n++) {
1176 pcc->cmds[n].sub.per_file_handle = *(uintptr_t *)data;
1184 case CUSE_IOCTL_CREATE_DEV:
1186 error = priv_check(curthread, PRIV_DRIVER);
1194 pcd->devname[sizeof(pcd->devname) - 1] = 0;
1196 if (pcd->devname[0] == 0) {
1200 cuse_str_filter(pcd->devname);
1202 pcd->permissions &= 0777;
1204 /* try to allocate a character device */
1206 pcsd = malloc(sizeof(*pcsd), M_CUSE, M_WAITOK | M_ZERO);
1214 pcsd->user_dev = pcd->dev;
1216 pcsd->kern_dev = make_dev_credf(MAKEDEV_CHECKNAME,
1217 &cuse_client_devsw, 0, NULL, pcd->user_id, pcd->group_id,
1218 pcd->permissions, "%s", pcd->devname);
1220 if (pcsd->kern_dev == NULL) {
1225 pcsd->kern_dev->si_drv1 = pcsd;
1228 TAILQ_INSERT_TAIL(&pcs->hdev, pcsd, entry);
1233 case CUSE_IOCTL_DESTROY_DEV:
1235 error = priv_check(curthread, PRIV_DRIVER);
1243 pcsd = TAILQ_FIRST(&pcs->hdev);
1244 while (pcsd != NULL) {
1245 if (pcsd->user_dev == *(struct cuse_dev **)data) {
1246 TAILQ_REMOVE(&pcs->hdev, pcsd, entry);
1248 cuse_server_free_dev(pcsd);
1251 pcsd = TAILQ_FIRST(&pcs->hdev);
1253 pcsd = TAILQ_NEXT(pcsd, entry);
1260 case CUSE_IOCTL_WRITE_DATA:
1261 case CUSE_IOCTL_READ_DATA:
1264 pchk = (struct cuse_data_chunk *)data;
1266 pccmd = cuse_server_find_command(pcs, curthread);
1268 if (pccmd == NULL) {
1269 error = ENXIO; /* invalid request */
1270 } else if (pchk->peer_ptr < CUSE_BUF_MIN_PTR) {
1271 error = EFAULT; /* NULL pointer */
1272 } else if (pchk->peer_ptr < CUSE_BUF_MAX_PTR) {
1273 error = cuse_server_ioctl_copy_locked(pccmd,
1274 pchk, cmd == CUSE_IOCTL_READ_DATA);
1276 error = cuse_server_data_copy_locked(pccmd,
1277 pchk, cmd == CUSE_IOCTL_READ_DATA);
1282 case CUSE_IOCTL_SELWAKEUP:
1285 * We don't know which direction caused the event.
1288 cuse_server_wakeup_all_client_locked(pcs);
1300 cuse_server_poll(struct cdev *dev, int events, struct thread *td)
1302 return (events & (POLLHUP | POLLPRI | POLLIN |
1303 POLLRDNORM | POLLOUT | POLLWRNORM));
1307 cuse_server_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
1308 vm_size_t size, struct vm_object **object, int nprot)
1310 uint32_t page_nr = *offset / PAGE_SIZE;
1311 uint32_t alloc_nr = page_nr / CUSE_ALLOC_PAGES_MAX;
1312 struct cuse_memory *mem;
1313 struct cuse_server *pcs;
1316 error = cuse_server_get(&pcs);
1321 /* lookup memory structure */
1322 TAILQ_FOREACH(mem, &pcs->hmem, entry) {
1323 if (mem->alloc_nr == alloc_nr)
1330 /* verify page offset */
1331 page_nr %= CUSE_ALLOC_PAGES_MAX;
1332 if (page_nr >= mem->page_count) {
1336 /* verify mmap size */
1337 if ((size % PAGE_SIZE) != 0 || (size < PAGE_SIZE) ||
1338 (size > ((mem->page_count - page_nr) * PAGE_SIZE))) {
1342 vm_object_reference(mem->object);
1343 *object = mem->object;
1346 /* set new VM object offset to use */
1347 *offset = page_nr * PAGE_SIZE;
1353 /*------------------------------------------------------------------------*
1355 *------------------------------------------------------------------------*/
1357 cuse_client_free(void *arg)
1359 struct cuse_client *pcc = arg;
1360 struct cuse_client_command *pccmd;
1361 struct cuse_server *pcs;
1365 cuse_client_is_closing(pcc);
1366 TAILQ_REMOVE(&pcc->server->hcli, pcc, entry);
1369 for (n = 0; n != CUSE_CMD_MAX; n++) {
1371 pccmd = &pcc->cmds[n];
1373 sx_destroy(&pccmd->sx);
1374 cv_destroy(&pccmd->cv);
1381 /* drop reference on server */
1382 cuse_server_unref(pcs);
1386 cuse_client_open(struct cdev *dev, int fflags, int devtype, struct thread *td)
1388 struct cuse_client_command *pccmd;
1389 struct cuse_server_dev *pcsd;
1390 struct cuse_client *pcc;
1391 struct cuse_server *pcs;
1392 struct cuse_dev *pcd;
1397 pcsd = dev->si_drv1;
1400 pcd = pcsd->user_dev;
1402 * Check that the refcount didn't wrap and that the
1403 * same process is not both client and server. This
1404 * can easily lead to deadlocks when destroying the
1405 * CUSE character device nodes:
1408 if (pcs->refs < 0 || pcs->pid == curproc->p_pid) {
1409 /* overflow or wrong PID */
1422 pcc = malloc(sizeof(*pcc), M_CUSE, M_WAITOK | M_ZERO);
1424 /* drop reference on server */
1425 cuse_server_unref(pcs);
1428 if (devfs_set_cdevpriv(pcc, &cuse_client_free)) {
1429 printf("Cuse: Cannot set cdevpriv.\n");
1430 /* drop reference on server */
1431 cuse_server_unref(pcs);
1435 pcc->fflags = fflags;
1436 pcc->server_dev = pcsd;
1439 for (n = 0; n != CUSE_CMD_MAX; n++) {
1441 pccmd = &pcc->cmds[n];
1443 pccmd->sub.dev = pcd;
1444 pccmd->sub.command = n;
1445 pccmd->client = pcc;
1447 sx_init(&pccmd->sx, "cuse-client-sx");
1448 cv_init(&pccmd->cv, "cuse-client-cv");
1453 /* cuse_client_free() assumes that the client is listed somewhere! */
1454 /* always enqueue */
1456 TAILQ_INSERT_TAIL(&pcs->hcli, pcc, entry);
1458 /* check if server is closing */
1459 if ((pcs->is_closing != 0) || (dev->si_drv1 == NULL)) {
1467 devfs_clear_cdevpriv(); /* XXX bugfix */
1470 pccmd = &pcc->cmds[CUSE_CMD_OPEN];
1472 cuse_cmd_lock(pccmd);
1475 cuse_client_send_command_locked(pccmd, 0, 0, pcc->fflags, 0);
1477 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1481 error = cuse_convert_error(error);
1486 cuse_cmd_unlock(pccmd);
1489 devfs_clear_cdevpriv(); /* XXX bugfix */
1495 cuse_client_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
1497 struct cuse_client_command *pccmd;
1498 struct cuse_client *pcc;
1501 error = cuse_client_get(&pcc);
1505 pccmd = &pcc->cmds[CUSE_CMD_CLOSE];
1507 cuse_cmd_lock(pccmd);
1510 cuse_client_send_command_locked(pccmd, 0, 0, pcc->fflags, 0);
1512 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1515 cuse_cmd_unlock(pccmd);
1518 cuse_client_is_closing(pcc);
1525 cuse_client_kqfilter_poll(struct cdev *dev, struct cuse_client *pcc)
1530 temp = (pcc->cflags & (CUSE_CLI_KNOTE_HAS_READ |
1531 CUSE_CLI_KNOTE_HAS_WRITE));
1532 pcc->cflags &= ~(CUSE_CLI_KNOTE_NEED_READ |
1533 CUSE_CLI_KNOTE_NEED_WRITE);
1537 /* get the latest polling state from the server */
1538 temp = cuse_client_poll(dev, POLLIN | POLLOUT, NULL);
1540 if (temp & (POLLIN | POLLOUT)) {
1543 pcc->cflags |= CUSE_CLI_KNOTE_NEED_READ;
1545 pcc->cflags |= CUSE_CLI_KNOTE_NEED_WRITE;
1547 /* make sure the "knote" gets woken up */
1548 cuse_server_wakeup_locked(pcc->server);
1555 cuse_client_read(struct cdev *dev, struct uio *uio, int ioflag)
1557 struct cuse_client_command *pccmd;
1558 struct cuse_client *pcc;
1562 error = cuse_client_get(&pcc);
1566 pccmd = &pcc->cmds[CUSE_CMD_READ];
1568 if (uio->uio_segflg != UIO_USERSPACE) {
1571 uio->uio_segflg = UIO_NOCOPY;
1573 cuse_cmd_lock(pccmd);
1575 while (uio->uio_resid != 0) {
1577 if (uio->uio_iov->iov_len > CUSE_LENGTH_MAX) {
1581 len = uio->uio_iov->iov_len;
1584 cuse_client_send_command_locked(pccmd,
1585 (uintptr_t)uio->uio_iov->iov_base,
1586 (unsigned long)(unsigned int)len, pcc->fflags, ioflag);
1588 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1592 error = cuse_convert_error(error);
1594 } else if (error == len) {
1595 error = uiomove(NULL, error, uio);
1599 error = uiomove(NULL, error, uio);
1603 cuse_cmd_unlock(pccmd);
1605 uio->uio_segflg = UIO_USERSPACE;/* restore segment flag */
1607 if (error == EWOULDBLOCK)
1608 cuse_client_kqfilter_poll(dev, pcc);
1614 cuse_client_write(struct cdev *dev, struct uio *uio, int ioflag)
1616 struct cuse_client_command *pccmd;
1617 struct cuse_client *pcc;
1621 error = cuse_client_get(&pcc);
1625 pccmd = &pcc->cmds[CUSE_CMD_WRITE];
1627 if (uio->uio_segflg != UIO_USERSPACE) {
1630 uio->uio_segflg = UIO_NOCOPY;
1632 cuse_cmd_lock(pccmd);
1634 while (uio->uio_resid != 0) {
1636 if (uio->uio_iov->iov_len > CUSE_LENGTH_MAX) {
1640 len = uio->uio_iov->iov_len;
1643 cuse_client_send_command_locked(pccmd,
1644 (uintptr_t)uio->uio_iov->iov_base,
1645 (unsigned long)(unsigned int)len, pcc->fflags, ioflag);
1647 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1651 error = cuse_convert_error(error);
1653 } else if (error == len) {
1654 error = uiomove(NULL, error, uio);
1658 error = uiomove(NULL, error, uio);
1662 cuse_cmd_unlock(pccmd);
1664 uio->uio_segflg = UIO_USERSPACE;/* restore segment flag */
1666 if (error == EWOULDBLOCK)
1667 cuse_client_kqfilter_poll(dev, pcc);
1673 cuse_client_ioctl(struct cdev *dev, unsigned long cmd,
1674 caddr_t data, int fflag, struct thread *td)
1676 struct cuse_client_command *pccmd;
1677 struct cuse_client *pcc;
1681 error = cuse_client_get(&pcc);
1685 len = IOCPARM_LEN(cmd);
1686 if (len > CUSE_BUFFER_MAX)
1689 pccmd = &pcc->cmds[CUSE_CMD_IOCTL];
1691 cuse_cmd_lock(pccmd);
1693 if (cmd & (IOC_IN | IOC_VOID))
1694 memcpy(pcc->ioctl_buffer, data, len);
1697 * When the ioctl-length is zero drivers can pass information
1698 * through the data pointer of the ioctl. Make sure this information
1699 * is forwarded to the driver.
1703 cuse_client_send_command_locked(pccmd,
1704 (len == 0) ? *(long *)data : CUSE_BUF_MIN_PTR,
1705 (unsigned long)cmd, pcc->fflags,
1706 (fflag & O_NONBLOCK) ? IO_NDELAY : 0);
1708 error = cuse_client_receive_command_locked(pccmd, data, len);
1712 error = cuse_convert_error(error);
1718 memcpy(data, pcc->ioctl_buffer, len);
1720 cuse_cmd_unlock(pccmd);
1722 if (error == EWOULDBLOCK)
1723 cuse_client_kqfilter_poll(dev, pcc);
1729 cuse_client_poll(struct cdev *dev, int events, struct thread *td)
1731 struct cuse_client_command *pccmd;
1732 struct cuse_client *pcc;
1737 error = cuse_client_get(&pcc);
1743 if (events & (POLLPRI | POLLIN | POLLRDNORM))
1744 temp |= CUSE_POLL_READ;
1746 if (events & (POLLOUT | POLLWRNORM))
1747 temp |= CUSE_POLL_WRITE;
1749 if (events & POLLHUP)
1750 temp |= CUSE_POLL_ERROR;
1752 pccmd = &pcc->cmds[CUSE_CMD_POLL];
1754 cuse_cmd_lock(pccmd);
1756 /* Need to selrecord() first to not loose any events. */
1757 if (temp != 0 && td != NULL)
1758 selrecord(td, &pcc->server->selinfo);
1761 cuse_client_send_command_locked(pccmd,
1762 0, temp, pcc->fflags, IO_NDELAY);
1764 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1767 cuse_cmd_unlock(pccmd);
1773 if (error & CUSE_POLL_READ)
1774 revents |= (events & (POLLPRI | POLLIN | POLLRDNORM));
1775 if (error & CUSE_POLL_WRITE)
1776 revents |= (events & (POLLOUT | POLLWRNORM));
1777 if (error & CUSE_POLL_ERROR)
1778 revents |= (events & POLLHUP);
1783 /* XXX many clients don't understand POLLNVAL */
1784 return (events & (POLLHUP | POLLPRI | POLLIN |
1785 POLLRDNORM | POLLOUT | POLLWRNORM));
1789 cuse_client_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
1790 vm_size_t size, struct vm_object **object, int nprot)
1792 uint32_t page_nr = *offset / PAGE_SIZE;
1793 uint32_t alloc_nr = page_nr / CUSE_ALLOC_PAGES_MAX;
1794 struct cuse_memory *mem;
1795 struct cuse_client *pcc;
1798 error = cuse_client_get(&pcc);
1803 /* lookup memory structure */
1804 TAILQ_FOREACH(mem, &pcc->server->hmem, entry) {
1805 if (mem->alloc_nr == alloc_nr)
1812 /* verify page offset */
1813 page_nr %= CUSE_ALLOC_PAGES_MAX;
1814 if (page_nr >= mem->page_count) {
1818 /* verify mmap size */
1819 if ((size % PAGE_SIZE) != 0 || (size < PAGE_SIZE) ||
1820 (size > ((mem->page_count - page_nr) * PAGE_SIZE))) {
1824 vm_object_reference(mem->object);
1825 *object = mem->object;
1828 /* set new VM object offset to use */
1829 *offset = page_nr * PAGE_SIZE;
1836 cuse_client_kqfilter_read_detach(struct knote *kn)
1838 struct cuse_client *pcc;
1842 knlist_remove(&pcc->server->selinfo.si_note, kn, 1);
1847 cuse_client_kqfilter_write_detach(struct knote *kn)
1849 struct cuse_client *pcc;
1853 knlist_remove(&pcc->server->selinfo.si_note, kn, 1);
1858 cuse_client_kqfilter_read_event(struct knote *kn, long hint)
1860 struct cuse_client *pcc;
1862 mtx_assert(&cuse_mtx, MA_OWNED);
1865 return ((pcc->cflags & CUSE_CLI_KNOTE_NEED_READ) ? 1 : 0);
1869 cuse_client_kqfilter_write_event(struct knote *kn, long hint)
1871 struct cuse_client *pcc;
1873 mtx_assert(&cuse_mtx, MA_OWNED);
1876 return ((pcc->cflags & CUSE_CLI_KNOTE_NEED_WRITE) ? 1 : 0);
1880 cuse_client_kqfilter(struct cdev *dev, struct knote *kn)
1882 struct cuse_client *pcc;
1883 struct cuse_server *pcs;
1886 error = cuse_client_get(&pcc);
1892 switch (kn->kn_filter) {
1894 pcc->cflags |= CUSE_CLI_KNOTE_HAS_READ;
1896 kn->kn_fop = &cuse_client_kqfilter_read_ops;
1897 knlist_add(&pcs->selinfo.si_note, kn, 1);
1900 pcc->cflags |= CUSE_CLI_KNOTE_HAS_WRITE;
1902 kn->kn_fop = &cuse_client_kqfilter_write_ops;
1903 knlist_add(&pcs->selinfo.si_note, kn, 1);
1912 cuse_client_kqfilter_poll(dev, pcc);