3 * Copyright (c) 2010-2013 Hans Petter Selasky. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include "opt_compat.h"
29 #include <sys/stdint.h>
30 #include <sys/stddef.h>
31 #include <sys/param.h>
32 #include <sys/types.h>
33 #include <sys/systm.h>
35 #include <sys/kernel.h>
37 #include <sys/linker_set.h>
38 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/condvar.h>
42 #include <sys/sysctl.h>
43 #include <sys/unistd.h>
44 #include <sys/malloc.h>
49 #include <sys/queue.h>
50 #include <sys/fcntl.h>
52 #include <sys/vnode.h>
53 #include <sys/selinfo.h>
54 #include <sys/ptrace.h>
56 #include <machine/bus.h>
61 #include <fs/cuse/cuse_defs.h>
62 #include <fs/cuse/cuse_ioctl.h>
64 MODULE_VERSION(cuse, 1);
66 #define NBUSY ((uint8_t *)1)
69 FEATURE(cuse, "Userspace character devices");
76 struct cuse_client_command {
77 TAILQ_ENTRY(cuse_client_command) entry;
78 struct cuse_command sub;
81 struct thread *entered;
82 struct cuse_client *client;
83 struct proc *proc_curr;
91 struct cuse_server *owner;
94 uint32_t is_allocated;
97 struct cuse_server_dev {
98 TAILQ_ENTRY(cuse_server_dev) entry;
99 struct cuse_server *server;
100 struct cdev *kern_dev;
101 struct cuse_dev *user_dev;
105 TAILQ_ENTRY(cuse_server) entry;
106 TAILQ_HEAD(, cuse_client_command) head;
107 TAILQ_HEAD(, cuse_server_dev) hdev;
108 TAILQ_HEAD(, cuse_client) hcli;
110 struct selinfo selinfo;
116 TAILQ_ENTRY(cuse_client) entry;
117 TAILQ_ENTRY(cuse_client) entry_ref;
118 struct cuse_client_command cmds[CUSE_CMD_MAX];
119 struct cuse_server *server;
120 struct cuse_server_dev *server_dev;
122 uint8_t ioctl_buffer[CUSE_BUFFER_MAX] __aligned(4);
124 int fflags; /* file flags */
125 int cflags; /* client flags */
126 #define CUSE_CLI_IS_CLOSING 0x01
127 #define CUSE_CLI_KNOTE_NEED_READ 0x02
128 #define CUSE_CLI_KNOTE_NEED_WRITE 0x04
129 #define CUSE_CLI_KNOTE_HAS_READ 0x08
130 #define CUSE_CLI_KNOTE_HAS_WRITE 0x10
133 #define CUSE_CLIENT_CLOSING(pcc) \
134 ((pcc)->cflags & CUSE_CLI_IS_CLOSING)
136 static MALLOC_DEFINE(M_CUSE, "cuse", "CUSE memory");
138 static TAILQ_HEAD(, cuse_server) cuse_server_head;
139 static struct mtx cuse_mtx;
140 static struct cdev *cuse_dev;
141 static struct cuse_server *cuse_alloc_unit[CUSE_DEVICES_MAX];
142 static int cuse_alloc_unit_id[CUSE_DEVICES_MAX];
143 static struct cuse_memory cuse_mem[CUSE_ALLOC_UNIT_MAX];
145 static void cuse_server_wakeup_all_client_locked(struct cuse_server *pcs);
146 static void cuse_client_kqfilter_read_detach(struct knote *kn);
147 static void cuse_client_kqfilter_write_detach(struct knote *kn);
148 static int cuse_client_kqfilter_read_event(struct knote *kn, long hint);
149 static int cuse_client_kqfilter_write_event(struct knote *kn, long hint);
151 static struct filterops cuse_client_kqfilter_read_ops = {
153 .f_detach = cuse_client_kqfilter_read_detach,
154 .f_event = cuse_client_kqfilter_read_event,
157 static struct filterops cuse_client_kqfilter_write_ops = {
159 .f_detach = cuse_client_kqfilter_write_detach,
160 .f_event = cuse_client_kqfilter_write_event,
163 static d_open_t cuse_client_open;
164 static d_close_t cuse_client_close;
165 static d_ioctl_t cuse_client_ioctl;
166 static d_read_t cuse_client_read;
167 static d_write_t cuse_client_write;
168 static d_poll_t cuse_client_poll;
169 static d_mmap_t cuse_client_mmap;
170 static d_kqfilter_t cuse_client_kqfilter;
172 static struct cdevsw cuse_client_devsw = {
173 .d_version = D_VERSION,
174 .d_open = cuse_client_open,
175 .d_close = cuse_client_close,
176 .d_ioctl = cuse_client_ioctl,
177 .d_name = "cuse_client",
178 .d_flags = D_TRACKCLOSE,
179 .d_read = cuse_client_read,
180 .d_write = cuse_client_write,
181 .d_poll = cuse_client_poll,
182 .d_mmap = cuse_client_mmap,
183 .d_kqfilter = cuse_client_kqfilter,
186 static d_open_t cuse_server_open;
187 static d_close_t cuse_server_close;
188 static d_ioctl_t cuse_server_ioctl;
189 static d_read_t cuse_server_read;
190 static d_write_t cuse_server_write;
191 static d_poll_t cuse_server_poll;
192 static d_mmap_t cuse_server_mmap;
194 static struct cdevsw cuse_server_devsw = {
195 .d_version = D_VERSION,
196 .d_open = cuse_server_open,
197 .d_close = cuse_server_close,
198 .d_ioctl = cuse_server_ioctl,
199 .d_name = "cuse_server",
200 .d_flags = D_TRACKCLOSE,
201 .d_read = cuse_server_read,
202 .d_write = cuse_server_write,
203 .d_poll = cuse_server_poll,
204 .d_mmap = cuse_server_mmap,
207 static void cuse_client_is_closing(struct cuse_client *);
208 static int cuse_free_unit_by_id_locked(struct cuse_server *, int);
219 mtx_unlock(&cuse_mtx);
223 cuse_cmd_lock(struct cuse_client_command *pccmd)
225 sx_xlock(&pccmd->sx);
229 cuse_cmd_unlock(struct cuse_client_command *pccmd)
231 sx_xunlock(&pccmd->sx);
235 cuse_kern_init(void *arg)
237 TAILQ_INIT(&cuse_server_head);
239 mtx_init(&cuse_mtx, "cuse-mtx", NULL, MTX_DEF);
241 cuse_dev = make_dev(&cuse_server_devsw, 0,
242 UID_ROOT, GID_OPERATOR, 0600, "cuse");
244 printf("Cuse v%d.%d.%d @ /dev/cuse\n",
245 (CUSE_VERSION >> 16) & 0xFF, (CUSE_VERSION >> 8) & 0xFF,
246 (CUSE_VERSION >> 0) & 0xFF);
249 SYSINIT(cuse_kern_init, SI_SUB_DEVFS, SI_ORDER_ANY, cuse_kern_init, 0);
252 cuse_kern_uninit(void *arg)
258 printf("Cuse: Please exit all /dev/cuse instances "
259 "and processes which have used this device.\n");
261 pause("DRAIN", 2 * hz);
264 ptr = TAILQ_FIRST(&cuse_server_head);
271 if (cuse_dev != NULL)
272 destroy_dev(cuse_dev);
274 mtx_destroy(&cuse_mtx);
277 SYSUNINIT(cuse_kern_uninit, SI_SUB_DEVFS, SI_ORDER_ANY, cuse_kern_uninit, 0);
280 cuse_server_get(struct cuse_server **ppcs)
282 struct cuse_server *pcs;
285 error = devfs_get_cdevpriv((void **)&pcs);
290 /* check if closing */
292 if (pcs->is_closing) {
303 cuse_server_is_closing(struct cuse_server *pcs)
305 struct cuse_client *pcc;
312 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
313 cuse_client_is_closing(pcc);
317 static struct cuse_client_command *
318 cuse_server_find_command(struct cuse_server *pcs, struct thread *td)
320 struct cuse_client *pcc;
326 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
327 if (CUSE_CLIENT_CLOSING(pcc))
329 for (n = 0; n != CUSE_CMD_MAX; n++) {
330 if (pcc->cmds[n].entered == td)
331 return (&pcc->cmds[n]);
339 cuse_str_filter(char *ptr)
343 while (((c = *ptr) != 0)) {
345 if ((c >= 'a') && (c <= 'z')) {
349 if ((c >= 'A') && (c <= 'Z')) {
353 if ((c >= '0') && (c <= '9')) {
357 if ((c == '.') || (c == '_') || (c == '/')) {
368 cuse_convert_error(int error)
376 case CUSE_ERR_WOULDBLOCK:
377 return (EWOULDBLOCK);
378 case CUSE_ERR_INVALID:
380 case CUSE_ERR_NO_MEMORY:
384 case CUSE_ERR_SIGNAL:
392 cuse_server_free_memory(struct cuse_server *pcs)
394 struct cuse_memory *mem;
397 for (n = 0; n != CUSE_ALLOC_UNIT_MAX; n++) {
400 /* this memory is never freed */
401 if (mem->owner == pcs) {
403 mem->is_allocated = 0;
409 cuse_server_alloc_memory(struct cuse_server *pcs,
410 struct cuse_memory *mem, uint32_t page_count)
417 if (mem->virtaddr == NBUSY) {
421 if (mem->virtaddr != NULL) {
422 if (mem->is_allocated != 0) {
426 if (mem->page_count == page_count) {
427 mem->is_allocated = 1;
435 memset(mem, 0, sizeof(*mem));
437 mem->virtaddr = NBUSY;
441 ptr = malloc(page_count * PAGE_SIZE, M_CUSE, M_WAITOK | M_ZERO);
450 mem->virtaddr = NULL;
455 mem->page_count = page_count;
456 mem->is_allocated = 1;
464 cuse_client_get(struct cuse_client **ppcc)
466 struct cuse_client *pcc;
469 /* try to get private data */
470 error = devfs_get_cdevpriv((void **)&pcc);
475 /* check if closing */
477 if (CUSE_CLIENT_CLOSING(pcc) || pcc->server->is_closing) {
488 cuse_client_is_closing(struct cuse_client *pcc)
490 struct cuse_client_command *pccmd;
493 if (CUSE_CLIENT_CLOSING(pcc))
496 pcc->cflags |= CUSE_CLI_IS_CLOSING;
497 pcc->server_dev = NULL;
499 for (n = 0; n != CUSE_CMD_MAX; n++) {
501 pccmd = &pcc->cmds[n];
503 if (pccmd->entry.tqe_prev != NULL) {
504 TAILQ_REMOVE(&pcc->server->head, pccmd, entry);
505 pccmd->entry.tqe_prev = NULL;
507 cv_broadcast(&pccmd->cv);
512 cuse_client_send_command_locked(struct cuse_client_command *pccmd,
513 unsigned long data_ptr, unsigned long arg, int fflags, int ioflag)
515 unsigned long cuse_fflags = 0;
516 struct cuse_server *pcs;
519 cuse_fflags |= CUSE_FFLAG_READ;
522 cuse_fflags |= CUSE_FFLAG_WRITE;
524 if (ioflag & IO_NDELAY)
525 cuse_fflags |= CUSE_FFLAG_NONBLOCK;
527 pccmd->sub.fflags = cuse_fflags;
528 pccmd->sub.data_pointer = data_ptr;
529 pccmd->sub.argument = arg;
531 pcs = pccmd->client->server;
533 if ((pccmd->entry.tqe_prev == NULL) &&
534 (CUSE_CLIENT_CLOSING(pccmd->client) == 0) &&
535 (pcs->is_closing == 0)) {
536 TAILQ_INSERT_TAIL(&pcs->head, pccmd, entry);
542 cuse_client_got_signal(struct cuse_client_command *pccmd)
544 struct cuse_server *pcs;
546 pccmd->got_signal = 1;
548 pccmd = &pccmd->client->cmds[CUSE_CMD_SIGNAL];
550 pcs = pccmd->client->server;
552 if ((pccmd->entry.tqe_prev == NULL) &&
553 (CUSE_CLIENT_CLOSING(pccmd->client) == 0) &&
554 (pcs->is_closing == 0)) {
555 TAILQ_INSERT_TAIL(&pcs->head, pccmd, entry);
561 cuse_client_receive_command_locked(struct cuse_client_command *pccmd,
562 uint8_t *arg_ptr, uint32_t arg_len)
568 pccmd->proc_curr = curthread->td_proc;
570 if (CUSE_CLIENT_CLOSING(pccmd->client) ||
571 pccmd->client->server->is_closing) {
572 error = CUSE_ERR_OTHER;
575 while (pccmd->command == CUSE_CMD_NONE) {
577 cv_wait(&pccmd->cv, &cuse_mtx);
579 error = cv_wait_sig(&pccmd->cv, &cuse_mtx);
582 cuse_client_got_signal(pccmd);
584 if (CUSE_CLIENT_CLOSING(pccmd->client) ||
585 pccmd->client->server->is_closing) {
586 error = CUSE_ERR_OTHER;
591 error = pccmd->error;
592 pccmd->command = CUSE_CMD_NONE;
593 cv_signal(&pccmd->cv);
597 /* wait until all process references are gone */
599 pccmd->proc_curr = NULL;
601 while (pccmd->proc_refs != 0)
602 cv_wait(&pccmd->cv, &cuse_mtx);
607 /*------------------------------------------------------------------------*
609 *------------------------------------------------------------------------*/
612 cuse_server_free_dev(struct cuse_server_dev *pcsd)
614 struct cuse_server *pcs;
615 struct cuse_client *pcc;
617 /* get server pointer */
620 /* prevent creation of more devices */
622 if (pcsd->kern_dev != NULL)
623 pcsd->kern_dev->si_drv1 = NULL;
625 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
626 if (pcc->server_dev == pcsd)
627 cuse_client_is_closing(pcc);
631 /* destroy device, if any */
632 if (pcsd->kern_dev != NULL) {
633 /* destroy device synchronously */
634 destroy_dev(pcsd->kern_dev);
640 cuse_server_free(void *arg)
642 struct cuse_server *pcs = arg;
643 struct cuse_server_dev *pcsd;
647 if (pcs->refs != 0) {
651 cuse_server_is_closing(pcs);
652 /* final client wakeup, if any */
653 cuse_server_wakeup_all_client_locked(pcs);
655 TAILQ_REMOVE(&cuse_server_head, pcs, entry);
657 cuse_free_unit_by_id_locked(pcs, -1);
659 while ((pcsd = TAILQ_FIRST(&pcs->hdev)) != NULL) {
660 TAILQ_REMOVE(&pcs->hdev, pcsd, entry);
662 cuse_server_free_dev(pcsd);
666 cuse_server_free_memory(pcs);
668 knlist_clear(&pcs->selinfo.si_note, 1);
669 knlist_destroy(&pcs->selinfo.si_note);
673 seldrain(&pcs->selinfo);
675 cv_destroy(&pcs->cv);
681 cuse_server_open(struct cdev *dev, int fflags, int devtype, struct thread *td)
683 struct cuse_server *pcs;
685 pcs = malloc(sizeof(*pcs), M_CUSE, M_WAITOK | M_ZERO);
689 if (devfs_set_cdevpriv(pcs, &cuse_server_free)) {
690 printf("Cuse: Cannot set cdevpriv.\n");
694 TAILQ_INIT(&pcs->head);
695 TAILQ_INIT(&pcs->hdev);
696 TAILQ_INIT(&pcs->hcli);
698 cv_init(&pcs->cv, "cuse-server-cv");
700 knlist_init_mtx(&pcs->selinfo.si_note, &cuse_mtx);
704 TAILQ_INSERT_TAIL(&cuse_server_head, pcs, entry);
711 cuse_server_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
713 struct cuse_server *pcs;
716 error = cuse_server_get(&pcs);
721 cuse_server_is_closing(pcs);
722 /* final client wakeup, if any */
723 cuse_server_wakeup_all_client_locked(pcs);
725 knlist_clear(&pcs->selinfo.si_note, 1);
733 cuse_server_read(struct cdev *dev, struct uio *uio, int ioflag)
739 cuse_server_write(struct cdev *dev, struct uio *uio, int ioflag)
745 cuse_server_ioctl_copy_locked(struct cuse_client_command *pccmd,
746 struct cuse_data_chunk *pchk, int isread)
752 offset = pchk->peer_ptr - CUSE_BUF_MIN_PTR;
754 if (pchk->length > CUSE_BUFFER_MAX)
757 if (offset >= CUSE_BUFFER_MAX)
760 if ((offset + pchk->length) > CUSE_BUFFER_MAX)
763 p_proc = pccmd->proc_curr;
767 if (pccmd->proc_refs < 0)
776 (void *)pchk->local_ptr,
777 pccmd->client->ioctl_buffer + offset,
781 pccmd->client->ioctl_buffer + offset,
782 (void *)pchk->local_ptr,
790 if (pccmd->proc_curr == NULL)
791 cv_signal(&pccmd->cv);
797 cuse_proc2proc_copy(struct proc *proc_s, vm_offset_t data_s,
798 struct proc *proc_d, vm_offset_t data_d, size_t len)
801 struct proc *proc_cur;
805 proc_cur = td->td_proc;
807 if (proc_cur == proc_d) {
809 .iov_base = (caddr_t)data_d,
815 .uio_offset = (off_t)data_s,
817 .uio_segflg = UIO_USERSPACE,
823 error = proc_rwmem(proc_s, &uio);
826 } else if (proc_cur == proc_s) {
828 .iov_base = (caddr_t)data_s,
834 .uio_offset = (off_t)data_d,
836 .uio_segflg = UIO_USERSPACE,
842 error = proc_rwmem(proc_d, &uio);
851 cuse_server_data_copy_locked(struct cuse_client_command *pccmd,
852 struct cuse_data_chunk *pchk, int isread)
857 p_proc = pccmd->proc_curr;
861 if (pccmd->proc_refs < 0)
869 error = cuse_proc2proc_copy(
870 curthread->td_proc, pchk->local_ptr,
871 p_proc, pchk->peer_ptr,
874 error = cuse_proc2proc_copy(
875 p_proc, pchk->peer_ptr,
876 curthread->td_proc, pchk->local_ptr,
884 if (pccmd->proc_curr == NULL)
885 cv_signal(&pccmd->cv);
891 cuse_alloc_unit_by_id_locked(struct cuse_server *pcs, int id)
898 for (match = n = 0; n != CUSE_DEVICES_MAX; n++) {
899 if (cuse_alloc_unit[n] != NULL) {
900 if ((cuse_alloc_unit_id[n] ^ id) & CUSE_ID_MASK)
902 if ((cuse_alloc_unit_id[n] & ~CUSE_ID_MASK) == x) {
911 for (n = 0; n != CUSE_DEVICES_MAX; n++) {
912 if (cuse_alloc_unit[n] == NULL) {
913 cuse_alloc_unit[n] = pcs;
914 cuse_alloc_unit_id[n] = id | x;
923 cuse_server_wakeup_locked(struct cuse_server *pcs)
925 selwakeup(&pcs->selinfo);
926 KNOTE_LOCKED(&pcs->selinfo.si_note, 0);
930 cuse_server_wakeup_all_client_locked(struct cuse_server *pcs)
932 struct cuse_client *pcc;
934 TAILQ_FOREACH(pcc, &pcs->hcli, entry) {
935 pcc->cflags |= (CUSE_CLI_KNOTE_NEED_READ |
936 CUSE_CLI_KNOTE_NEED_WRITE);
938 cuse_server_wakeup_locked(pcs);
942 cuse_free_unit_by_id_locked(struct cuse_server *pcs, int id)
947 for (n = 0; n != CUSE_DEVICES_MAX; n++) {
948 if (cuse_alloc_unit[n] == pcs) {
949 if (cuse_alloc_unit_id[n] == id || id == -1) {
950 cuse_alloc_unit[n] = NULL;
951 cuse_alloc_unit_id[n] = 0;
957 return (found ? 0 : EINVAL);
961 cuse_server_ioctl(struct cdev *dev, unsigned long cmd,
962 caddr_t data, int fflag, struct thread *td)
964 struct cuse_server *pcs;
967 error = cuse_server_get(&pcs);
972 struct cuse_client_command *pccmd;
973 struct cuse_client *pcc;
974 struct cuse_command *pcmd;
975 struct cuse_alloc_info *pai;
976 struct cuse_create_dev *pcd;
977 struct cuse_server_dev *pcsd;
978 struct cuse_data_chunk *pchk;
981 case CUSE_IOCTL_GET_COMMAND:
986 while ((pccmd = TAILQ_FIRST(&pcs->head)) == NULL) {
987 error = cv_wait_sig(&pcs->cv, &cuse_mtx);
998 TAILQ_REMOVE(&pcs->head, pccmd, entry);
999 pccmd->entry.tqe_prev = NULL;
1001 pccmd->entered = curthread;
1009 case CUSE_IOCTL_SYNC_COMMAND:
1012 while ((pccmd = cuse_server_find_command(pcs, curthread)) != NULL) {
1014 /* send sync command */
1015 pccmd->entered = NULL;
1016 pccmd->error = *(int *)data;
1017 pccmd->command = CUSE_CMD_SYNC;
1019 /* signal peer, if any */
1020 cv_signal(&pccmd->cv);
1026 case CUSE_IOCTL_ALLOC_UNIT:
1029 n = cuse_alloc_unit_by_id_locked(pcs,
1030 CUSE_ID_DEFAULT(0));
1039 case CUSE_IOCTL_ALLOC_UNIT_BY_ID:
1043 n = (n & CUSE_ID_MASK);
1046 n = cuse_alloc_unit_by_id_locked(pcs, n);
1055 case CUSE_IOCTL_FREE_UNIT:
1059 n = CUSE_ID_DEFAULT(n);
1062 error = cuse_free_unit_by_id_locked(pcs, n);
1066 case CUSE_IOCTL_FREE_UNIT_BY_ID:
1071 error = cuse_free_unit_by_id_locked(pcs, n);
1075 case CUSE_IOCTL_ALLOC_MEMORY:
1079 if (pai->alloc_nr >= CUSE_ALLOC_UNIT_MAX) {
1083 if (pai->page_count > CUSE_ALLOC_PAGES_MAX) {
1087 error = cuse_server_alloc_memory(pcs,
1088 &cuse_mem[pai->alloc_nr], pai->page_count);
1091 case CUSE_IOCTL_FREE_MEMORY:
1094 if (pai->alloc_nr >= CUSE_ALLOC_UNIT_MAX) {
1098 /* we trust the character device driver in this case */
1101 if (cuse_mem[pai->alloc_nr].owner == pcs) {
1102 cuse_mem[pai->alloc_nr].is_allocated = 0;
1103 cuse_mem[pai->alloc_nr].owner = NULL;
1110 case CUSE_IOCTL_GET_SIG:
1113 pccmd = cuse_server_find_command(pcs, curthread);
1115 if (pccmd != NULL) {
1116 n = pccmd->got_signal;
1117 pccmd->got_signal = 0;
1127 case CUSE_IOCTL_SET_PFH:
1130 pccmd = cuse_server_find_command(pcs, curthread);
1132 if (pccmd != NULL) {
1133 pcc = pccmd->client;
1134 for (n = 0; n != CUSE_CMD_MAX; n++) {
1135 pcc->cmds[n].sub.per_file_handle = *(unsigned long *)data;
1143 case CUSE_IOCTL_CREATE_DEV:
1145 error = priv_check(curthread, PRIV_DRIVER);
1153 pcd->devname[sizeof(pcd->devname) - 1] = 0;
1155 if (pcd->devname[0] == 0) {
1159 cuse_str_filter(pcd->devname);
1161 pcd->permissions &= 0777;
1163 /* try to allocate a character device */
1165 pcsd = malloc(sizeof(*pcsd), M_CUSE, M_WAITOK | M_ZERO);
1173 pcsd->user_dev = pcd->dev;
1175 pcsd->kern_dev = make_dev_credf(MAKEDEV_CHECKNAME,
1176 &cuse_client_devsw, 0, NULL, pcd->user_id, pcd->group_id,
1177 pcd->permissions, "%s", pcd->devname);
1179 if (pcsd->kern_dev == NULL) {
1184 pcsd->kern_dev->si_drv1 = pcsd;
1187 TAILQ_INSERT_TAIL(&pcs->hdev, pcsd, entry);
1192 case CUSE_IOCTL_DESTROY_DEV:
1194 error = priv_check(curthread, PRIV_DRIVER);
1202 pcsd = TAILQ_FIRST(&pcs->hdev);
1203 while (pcsd != NULL) {
1204 if (pcsd->user_dev == *(struct cuse_dev **)data) {
1205 TAILQ_REMOVE(&pcs->hdev, pcsd, entry);
1207 cuse_server_free_dev(pcsd);
1210 pcsd = TAILQ_FIRST(&pcs->hdev);
1212 pcsd = TAILQ_NEXT(pcsd, entry);
1219 case CUSE_IOCTL_WRITE_DATA:
1220 case CUSE_IOCTL_READ_DATA:
1223 pchk = (struct cuse_data_chunk *)data;
1225 pccmd = cuse_server_find_command(pcs, curthread);
1227 if (pccmd == NULL) {
1228 error = ENXIO; /* invalid request */
1229 } else if (pchk->peer_ptr < CUSE_BUF_MIN_PTR) {
1230 error = EFAULT; /* NULL pointer */
1231 } else if (pchk->peer_ptr < CUSE_BUF_MAX_PTR) {
1232 error = cuse_server_ioctl_copy_locked(pccmd,
1233 pchk, cmd == CUSE_IOCTL_READ_DATA);
1235 error = cuse_server_data_copy_locked(pccmd,
1236 pchk, cmd == CUSE_IOCTL_READ_DATA);
1241 case CUSE_IOCTL_SELWAKEUP:
1244 * We don't know which direction caused the event.
1247 cuse_server_wakeup_all_client_locked(pcs);
1259 cuse_server_poll(struct cdev *dev, int events, struct thread *td)
1261 return (events & (POLLHUP | POLLPRI | POLLIN |
1262 POLLRDNORM | POLLOUT | POLLWRNORM));
1266 cuse_server_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr)
1268 uint32_t page_nr = offset / PAGE_SIZE;
1269 uint32_t alloc_nr = page_nr / CUSE_ALLOC_PAGES_MAX;
1270 struct cuse_memory *mem;
1271 struct cuse_server *pcs;
1275 if (alloc_nr >= CUSE_ALLOC_UNIT_MAX)
1278 error = cuse_server_get(&pcs);
1283 mem = &cuse_mem[alloc_nr];
1285 /* try to enforce slight ownership */
1286 if ((pcs != NULL) && (mem->owner != pcs)) {
1290 if (mem->virtaddr == NULL) {
1294 if (mem->virtaddr == NBUSY) {
1298 page_nr %= CUSE_ALLOC_PAGES_MAX;
1300 if (page_nr >= mem->page_count) {
1304 ptr = mem->virtaddr + (page_nr * PAGE_SIZE);
1307 *paddr = vtophys(ptr);
1312 /*------------------------------------------------------------------------*
1314 *------------------------------------------------------------------------*/
1316 cuse_client_free(void *arg)
1318 struct cuse_client *pcc = arg;
1319 struct cuse_client_command *pccmd;
1320 struct cuse_server *pcs;
1324 cuse_client_is_closing(pcc);
1325 TAILQ_REMOVE(&pcc->server->hcli, pcc, entry);
1328 for (n = 0; n != CUSE_CMD_MAX; n++) {
1330 pccmd = &pcc->cmds[n];
1332 sx_destroy(&pccmd->sx);
1333 cv_destroy(&pccmd->cv);
1340 /* drop reference on server */
1341 cuse_server_free(pcs);
1345 cuse_client_open(struct cdev *dev, int fflags, int devtype, struct thread *td)
1347 struct cuse_client_command *pccmd;
1348 struct cuse_server_dev *pcsd;
1349 struct cuse_client *pcc;
1350 struct cuse_server *pcs;
1351 struct cuse_dev *pcd;
1356 pcsd = dev->si_drv1;
1359 pcd = pcsd->user_dev;
1361 if (pcs->refs < 0) {
1375 pcc = malloc(sizeof(*pcc), M_CUSE, M_WAITOK | M_ZERO);
1377 /* drop reference on server */
1378 cuse_server_free(pcs);
1381 if (devfs_set_cdevpriv(pcc, &cuse_client_free)) {
1382 printf("Cuse: Cannot set cdevpriv.\n");
1383 /* drop reference on server */
1384 cuse_server_free(pcs);
1388 pcc->fflags = fflags;
1389 pcc->server_dev = pcsd;
1392 for (n = 0; n != CUSE_CMD_MAX; n++) {
1394 pccmd = &pcc->cmds[n];
1396 pccmd->sub.dev = pcd;
1397 pccmd->sub.command = n;
1398 pccmd->client = pcc;
1400 sx_init(&pccmd->sx, "cuse-client-sx");
1401 cv_init(&pccmd->cv, "cuse-client-cv");
1406 /* cuse_client_free() assumes that the client is listed somewhere! */
1407 /* always enqueue */
1409 TAILQ_INSERT_TAIL(&pcs->hcli, pcc, entry);
1411 /* check if server is closing */
1412 if ((pcs->is_closing != 0) || (dev->si_drv1 == NULL)) {
1420 devfs_clear_cdevpriv(); /* XXX bugfix */
1423 pccmd = &pcc->cmds[CUSE_CMD_OPEN];
1425 cuse_cmd_lock(pccmd);
1428 cuse_client_send_command_locked(pccmd, 0, 0, pcc->fflags, 0);
1430 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1434 error = cuse_convert_error(error);
1439 cuse_cmd_unlock(pccmd);
1442 devfs_clear_cdevpriv(); /* XXX bugfix */
1448 cuse_client_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
1450 struct cuse_client_command *pccmd;
1451 struct cuse_client *pcc;
1454 error = cuse_client_get(&pcc);
1458 pccmd = &pcc->cmds[CUSE_CMD_CLOSE];
1460 cuse_cmd_lock(pccmd);
1463 cuse_client_send_command_locked(pccmd, 0, 0, pcc->fflags, 0);
1465 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1468 cuse_cmd_unlock(pccmd);
1471 cuse_client_is_closing(pcc);
1478 cuse_client_kqfilter_poll(struct cdev *dev, struct cuse_client *pcc)
1483 temp = (pcc->cflags & (CUSE_CLI_KNOTE_HAS_READ |
1484 CUSE_CLI_KNOTE_HAS_WRITE));
1485 pcc->cflags &= ~(CUSE_CLI_KNOTE_NEED_READ |
1486 CUSE_CLI_KNOTE_NEED_WRITE);
1490 /* get the latest polling state from the server */
1491 temp = cuse_client_poll(dev, POLLIN | POLLOUT, NULL);
1494 if (temp & (POLLIN | POLLOUT)) {
1496 pcc->cflags |= CUSE_CLI_KNOTE_NEED_READ;
1498 pcc->cflags |= CUSE_CLI_KNOTE_NEED_WRITE;
1500 /* make sure the "knote" gets woken up */
1501 cuse_server_wakeup_locked(pcc->server);
1508 cuse_client_read(struct cdev *dev, struct uio *uio, int ioflag)
1510 struct cuse_client_command *pccmd;
1511 struct cuse_client *pcc;
1515 error = cuse_client_get(&pcc);
1519 pccmd = &pcc->cmds[CUSE_CMD_READ];
1521 if (uio->uio_segflg != UIO_USERSPACE) {
1524 uio->uio_segflg = UIO_NOCOPY;
1526 cuse_cmd_lock(pccmd);
1528 while (uio->uio_resid != 0) {
1530 if (uio->uio_iov->iov_len > CUSE_LENGTH_MAX) {
1535 len = uio->uio_iov->iov_len;
1538 cuse_client_send_command_locked(pccmd,
1539 (unsigned long)uio->uio_iov->iov_base,
1540 (unsigned long)(unsigned int)len, pcc->fflags, ioflag);
1542 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1546 error = cuse_convert_error(error);
1548 } else if (error == len) {
1549 error = uiomove(NULL, error, uio);
1553 error = uiomove(NULL, error, uio);
1557 cuse_cmd_unlock(pccmd);
1559 uio->uio_segflg = UIO_USERSPACE;/* restore segment flag */
1561 if (error == EWOULDBLOCK)
1562 cuse_client_kqfilter_poll(dev, pcc);
1568 cuse_client_write(struct cdev *dev, struct uio *uio, int ioflag)
1570 struct cuse_client_command *pccmd;
1571 struct cuse_client *pcc;
1575 error = cuse_client_get(&pcc);
1579 pccmd = &pcc->cmds[CUSE_CMD_WRITE];
1581 if (uio->uio_segflg != UIO_USERSPACE) {
1584 uio->uio_segflg = UIO_NOCOPY;
1586 cuse_cmd_lock(pccmd);
1588 while (uio->uio_resid != 0) {
1590 if (uio->uio_iov->iov_len > CUSE_LENGTH_MAX) {
1595 len = uio->uio_iov->iov_len;
1598 cuse_client_send_command_locked(pccmd,
1599 (unsigned long)uio->uio_iov->iov_base,
1600 (unsigned long)(unsigned int)len, pcc->fflags, ioflag);
1602 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1606 error = cuse_convert_error(error);
1608 } else if (error == len) {
1609 error = uiomove(NULL, error, uio);
1613 error = uiomove(NULL, error, uio);
1617 cuse_cmd_unlock(pccmd);
1619 uio->uio_segflg = UIO_USERSPACE;/* restore segment flag */
1621 if (error == EWOULDBLOCK)
1622 cuse_client_kqfilter_poll(dev, pcc);
1628 cuse_client_ioctl(struct cdev *dev, unsigned long cmd,
1629 caddr_t data, int fflag, struct thread *td)
1631 struct cuse_client_command *pccmd;
1632 struct cuse_client *pcc;
1636 error = cuse_client_get(&pcc);
1640 len = IOCPARM_LEN(cmd);
1641 if (len > CUSE_BUFFER_MAX)
1644 pccmd = &pcc->cmds[CUSE_CMD_IOCTL];
1646 cuse_cmd_lock(pccmd);
1649 memcpy(pcc->ioctl_buffer, data, len);
1652 * When the ioctl-length is zero drivers can pass information
1653 * through the data pointer of the ioctl. Make sure this information
1654 * is forwarded to the driver.
1658 cuse_client_send_command_locked(pccmd,
1659 (len == 0) ? *(long *)data : CUSE_BUF_MIN_PTR,
1660 (unsigned long)cmd, pcc->fflags,
1661 (fflag & O_NONBLOCK) ? IO_NDELAY : 0);
1663 error = cuse_client_receive_command_locked(pccmd, data, len);
1667 error = cuse_convert_error(error);
1673 memcpy(data, pcc->ioctl_buffer, len);
1675 cuse_cmd_unlock(pccmd);
1677 if (error == EWOULDBLOCK)
1678 cuse_client_kqfilter_poll(dev, pcc);
1684 cuse_client_poll(struct cdev *dev, int events, struct thread *td)
1686 struct cuse_client_command *pccmd;
1687 struct cuse_client *pcc;
1692 error = cuse_client_get(&pcc);
1698 if (events & (POLLPRI | POLLIN | POLLRDNORM))
1699 temp |= CUSE_POLL_READ;
1701 if (events & (POLLOUT | POLLWRNORM))
1702 temp |= CUSE_POLL_WRITE;
1704 if (events & POLLHUP)
1705 temp |= CUSE_POLL_ERROR;
1707 pccmd = &pcc->cmds[CUSE_CMD_POLL];
1709 cuse_cmd_lock(pccmd);
1711 /* Need to selrecord() first to not loose any events. */
1712 if (temp != 0 && td != NULL)
1713 selrecord(td, &pcc->server->selinfo);
1716 cuse_client_send_command_locked(pccmd,
1717 0, temp, pcc->fflags, IO_NDELAY);
1719 error = cuse_client_receive_command_locked(pccmd, 0, 0);
1722 cuse_cmd_unlock(pccmd);
1728 if (error & CUSE_POLL_READ)
1729 revents |= (events & (POLLPRI | POLLIN | POLLRDNORM));
1730 if (error & CUSE_POLL_WRITE)
1731 revents |= (events & (POLLOUT | POLLWRNORM));
1732 if (error & CUSE_POLL_ERROR)
1733 revents |= (events & POLLHUP);
1738 /* XXX many clients don't understand POLLNVAL */
1739 return (events & (POLLHUP | POLLPRI | POLLIN |
1740 POLLRDNORM | POLLOUT | POLLWRNORM));
1744 cuse_client_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr)
1746 uint32_t page_nr = offset / PAGE_SIZE;
1747 uint32_t alloc_nr = page_nr / CUSE_ALLOC_PAGES_MAX;
1748 struct cuse_memory *mem;
1749 struct cuse_server *pcs;
1750 struct cuse_client *pcc;
1754 if (alloc_nr >= CUSE_ALLOC_UNIT_MAX)
1757 error = cuse_client_get(&pcc);
1764 mem = &cuse_mem[alloc_nr];
1766 /* try to enforce slight ownership */
1767 if ((pcs != NULL) && (mem->owner != pcs)) {
1771 if (mem->virtaddr == NULL) {
1775 if (mem->virtaddr == NBUSY) {
1779 page_nr %= CUSE_ALLOC_PAGES_MAX;
1781 if (page_nr >= mem->page_count) {
1785 ptr = mem->virtaddr + (page_nr * PAGE_SIZE);
1788 *paddr = vtophys(ptr);
1794 cuse_client_kqfilter_read_detach(struct knote *kn)
1796 struct cuse_client *pcc;
1800 knlist_remove(&pcc->server->selinfo.si_note, kn, 1);
1805 cuse_client_kqfilter_write_detach(struct knote *kn)
1807 struct cuse_client *pcc;
1811 knlist_remove(&pcc->server->selinfo.si_note, kn, 1);
1816 cuse_client_kqfilter_read_event(struct knote *kn, long hint)
1818 struct cuse_client *pcc;
1820 mtx_assert(&cuse_mtx, MA_OWNED);
1823 return ((pcc->cflags & CUSE_CLI_KNOTE_NEED_READ) ? 1 : 0);
1827 cuse_client_kqfilter_write_event(struct knote *kn, long hint)
1829 struct cuse_client *pcc;
1831 mtx_assert(&cuse_mtx, MA_OWNED);
1834 return ((pcc->cflags & CUSE_CLI_KNOTE_NEED_WRITE) ? 1 : 0);
1838 cuse_client_kqfilter(struct cdev *dev, struct knote *kn)
1840 struct cuse_client *pcc;
1841 struct cuse_server *pcs;
1844 error = cuse_client_get(&pcc);
1850 switch (kn->kn_filter) {
1852 pcc->cflags |= CUSE_CLI_KNOTE_HAS_READ;
1854 kn->kn_fop = &cuse_client_kqfilter_read_ops;
1855 knlist_add(&pcs->selinfo.si_note, kn, 1);
1858 pcc->cflags |= CUSE_CLI_KNOTE_HAS_WRITE;
1860 kn->kn_fop = &cuse_client_kqfilter_write_ops;
1861 knlist_add(&pcs->selinfo.si_note, kn, 1);
1870 cuse_client_kqfilter_poll(dev, pcc);