2 * Copyright (c) 2010-2022 Hans Petter Selasky. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include <sys/types.h>
37 #include <sys/queue.h>
38 #include <sys/fcntl.h>
40 #include <sys/param.h>
42 #include <fs/cuse/cuse_ioctl.h>
49 static const char *cuse_cmd_str(int cmd);
51 #define DPRINTF(...) do { \
52 if (cuse_debug_level != 0) \
53 printf(__VA_ARGS__); \
56 #define DPRINTF(...) do { } while (0)
59 struct cuse_vm_allocation {
64 struct cuse_dev_entered {
65 TAILQ_ENTRY(cuse_dev_entered) entry;
67 void *per_file_handle;
68 struct cuse_dev *cdev;
75 TAILQ_ENTRY(cuse_dev) entry;
76 const struct cuse_methods *mtod;
81 static int f_cuse = -1;
83 static pthread_mutex_t m_cuse;
84 static TAILQ_HEAD(, cuse_dev) h_cuse __guarded_by(m_cuse);
85 static TAILQ_HEAD(, cuse_dev_entered) h_cuse_entered __guarded_by(m_cuse);
86 static struct cuse_vm_allocation a_cuse[CUSE_ALLOC_UNIT_MAX]
90 pthread_mutex_lock(&m_cuse)
92 #define CUSE_UNLOCK() \
93 pthread_mutex_unlock(&m_cuse)
98 pthread_mutexattr_t attr;
100 f_cuse = open("/dev/cuse", O_RDWR);
102 if (feature_present("cuse") == 0)
103 return (CUSE_ERR_NOT_LOADED);
105 return (CUSE_ERR_INVALID);
107 pthread_mutexattr_init(&attr);
108 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
109 pthread_mutex_init(&m_cuse, &attr);
112 TAILQ_INIT(&h_cuse_entered);
123 return (CUSE_ERR_INVALID);
130 pthread_mutex_destroy(&m_cuse);
132 memset(a_cuse, 0, sizeof(a_cuse));
138 cuse_vmoffset(void *_ptr)
143 unsigned long remainder;
147 for (n = remainder = 0; n != CUSE_ALLOC_UNIT_MAX; n++) {
148 if (a_cuse[n].ptr == NULL)
151 ptr_min = a_cuse[n].ptr;
152 ptr_max = a_cuse[n].ptr + a_cuse[n].size - 1;
154 if ((ptr >= ptr_min) && (ptr <= ptr_max)) {
155 remainder = (ptr - ptr_min);
161 return ((n << CUSE_ALLOC_UNIT_SHIFT) + remainder);
165 cuse_vmalloc(unsigned size)
167 struct cuse_alloc_info info;
168 unsigned long pgsize;
175 /* some sanity checks */
176 if (f_cuse < 0 || size < 1 || size > CUSE_ALLOC_BYTES_MAX)
179 memset(&info, 0, sizeof(info));
181 pgsize = getpagesize();
182 info.page_count = howmany(size, pgsize);
184 /* compute how many units the allocation needs */
185 m = howmany(size, 1 << CUSE_ALLOC_UNIT_SHIFT);
186 if (m == 0 || m > CUSE_ALLOC_UNIT_MAX)
190 for (n = 0; n <= CUSE_ALLOC_UNIT_MAX - m; ) {
191 if (a_cuse[n].size != 0) {
192 /* skip to next available unit, depending on allocation size */
193 n += howmany(a_cuse[n].size, 1 << CUSE_ALLOC_UNIT_SHIFT);
196 /* check if there are "m" free units ahead */
197 for (x = 1; x != m; x++) {
198 if (a_cuse[n + x].size != 0)
202 /* skip to next available unit, if any */
206 /* reserve this unit by setting the size to a non-zero value */
207 a_cuse[n].size = size;
212 error = ioctl(f_cuse, CUSE_IOCTL_ALLOC_MEMORY, &info);
215 ptr = mmap(NULL, info.page_count * pgsize,
216 PROT_READ | PROT_WRITE,
217 MAP_SHARED, f_cuse, n << CUSE_ALLOC_UNIT_SHIFT);
219 if (ptr != MAP_FAILED) {
224 return (ptr); /* success */
227 (void) ioctl(f_cuse, CUSE_IOCTL_FREE_MEMORY, &info);
235 return (NULL); /* failure */
239 cuse_is_vmalloc_addr(void *ptr)
243 if (f_cuse < 0 || ptr == NULL)
244 return (0); /* false */
247 for (n = 0; n != CUSE_ALLOC_UNIT_MAX; n++) {
248 if (a_cuse[n].ptr == ptr)
253 return (n != CUSE_ALLOC_UNIT_MAX);
257 cuse_vmfree(void *ptr)
259 struct cuse_vm_allocation temp;
260 struct cuse_alloc_info info;
264 if (f_cuse < 0 || ptr == NULL)
268 for (n = 0; n != CUSE_ALLOC_UNIT_MAX; n++) {
269 if (a_cuse[n].ptr != ptr)
276 munmap(temp.ptr, temp.size);
278 memset(&info, 0, sizeof(info));
282 error = ioctl(f_cuse, CUSE_IOCTL_FREE_MEMORY, &info);
285 /* ignore any errors */
286 DPRINTF("Freeing memory failed: %d\n", errno);
290 a_cuse[n].ptr = NULL;
299 cuse_alloc_unit_number_by_id(int *pnum, int id)
304 return (CUSE_ERR_INVALID);
306 *pnum = (id & CUSE_ID_MASK);
308 error = ioctl(f_cuse, CUSE_IOCTL_ALLOC_UNIT_BY_ID, pnum);
310 return (CUSE_ERR_NO_MEMORY);
317 cuse_free_unit_number_by_id(int num, int id)
322 return (CUSE_ERR_INVALID);
324 if (num != -1 || id != -1)
325 num = (id & CUSE_ID_MASK) | (num & 0xFF);
327 error = ioctl(f_cuse, CUSE_IOCTL_FREE_UNIT_BY_ID, &num);
329 return (CUSE_ERR_NO_MEMORY);
335 cuse_alloc_unit_number(int *pnum)
340 return (CUSE_ERR_INVALID);
342 error = ioctl(f_cuse, CUSE_IOCTL_ALLOC_UNIT, pnum);
344 return (CUSE_ERR_NO_MEMORY);
350 cuse_free_unit_number(int num)
355 return (CUSE_ERR_INVALID);
357 error = ioctl(f_cuse, CUSE_IOCTL_FREE_UNIT, &num);
359 return (CUSE_ERR_NO_MEMORY);
365 cuse_dev_create(const struct cuse_methods *mtod, void *priv0, void *priv1,
366 uid_t _uid, gid_t _gid, int _perms, const char *_fmt,...)
368 struct cuse_create_dev info;
369 struct cuse_dev *cdev;
376 cdev = malloc(sizeof(*cdev));
380 memset(cdev, 0, sizeof(*cdev));
386 memset(&info, 0, sizeof(info));
390 info.group_id = _gid;
391 info.permissions = _perms;
393 va_start(args, _fmt);
394 vsnprintf(info.devname, sizeof(info.devname), _fmt, args);
397 error = ioctl(f_cuse, CUSE_IOCTL_CREATE_DEV, &info);
403 TAILQ_INSERT_TAIL(&h_cuse, cdev, entry);
411 cuse_dev_destroy(struct cuse_dev *cdev)
419 TAILQ_REMOVE(&h_cuse, cdev, entry);
422 error = ioctl(f_cuse, CUSE_IOCTL_DESTROY_DEV, &cdev);
430 cuse_dev_get_priv0(struct cuse_dev *cdev)
432 return (cdev->priv0);
436 cuse_dev_get_priv1(struct cuse_dev *cdev)
438 return (cdev->priv1);
442 cuse_dev_set_priv0(struct cuse_dev *cdev, void *priv)
448 cuse_dev_set_priv1(struct cuse_dev *cdev, void *priv)
454 cuse_wait_and_process(void)
456 pthread_t curr = pthread_self();
457 struct cuse_dev_entered *pe;
458 struct cuse_dev_entered enter;
459 struct cuse_command info;
460 struct cuse_dev *cdev;
464 return (CUSE_ERR_INVALID);
466 error = ioctl(f_cuse, CUSE_IOCTL_GET_COMMAND, &info);
468 return (CUSE_ERR_OTHER);
474 enter.per_file_handle = (void *)info.per_file_handle;
475 enter.cmd = info.command;
477 enter.got_signal = 0;
479 TAILQ_INSERT_TAIL(&h_cuse_entered, &enter, entry);
482 DPRINTF("cuse: Command = %d = %s, flags = %d, arg = 0x%08x, ptr = 0x%08x\n",
483 (int)info.command, cuse_cmd_str(info.command), (int)info.fflags,
484 (int)info.argument, (int)info.data_pointer);
486 switch (info.command) {
488 if (cdev->mtod->cm_open != NULL)
489 error = (cdev->mtod->cm_open) (cdev, (int)info.fflags);
496 /* wait for other threads to stop */
503 TAILQ_FOREACH(pe, &h_cuse_entered, entry) {
504 if (pe->cdev != cdev)
506 if (pe->thread == curr)
508 if (pe->per_file_handle !=
509 enter.per_file_handle)
512 pthread_kill(pe->thread, SIGHUP);
513 error = CUSE_ERR_BUSY;
523 if (cdev->mtod->cm_close != NULL)
524 error = (cdev->mtod->cm_close) (cdev, (int)info.fflags);
530 if (cdev->mtod->cm_read != NULL) {
531 error = (cdev->mtod->cm_read) (cdev, (int)info.fflags,
532 (void *)info.data_pointer, (int)info.argument);
534 error = CUSE_ERR_INVALID;
539 if (cdev->mtod->cm_write != NULL) {
540 error = (cdev->mtod->cm_write) (cdev, (int)info.fflags,
541 (void *)info.data_pointer, (int)info.argument);
543 error = CUSE_ERR_INVALID;
548 if (cdev->mtod->cm_ioctl != NULL) {
549 error = (cdev->mtod->cm_ioctl) (cdev, (int)info.fflags,
550 (unsigned int)info.argument, (void *)info.data_pointer);
552 error = CUSE_ERR_INVALID;
557 if (cdev->mtod->cm_poll != NULL) {
558 error = (cdev->mtod->cm_poll) (cdev, (int)info.fflags,
561 error = CUSE_POLL_ERROR;
565 case CUSE_CMD_SIGNAL:
567 TAILQ_FOREACH(pe, &h_cuse_entered, entry) {
568 if (pe->cdev != cdev)
570 if (pe->thread == curr)
572 if (pe->per_file_handle !=
573 enter.per_file_handle)
576 pthread_kill(pe->thread, SIGHUP);
582 error = CUSE_ERR_INVALID;
586 DPRINTF("cuse: Command error = %d for %s\n",
587 error, cuse_cmd_str(info.command));
590 TAILQ_REMOVE(&h_cuse_entered, &enter, entry);
593 /* we ignore any sync command failures */
594 ioctl(f_cuse, CUSE_IOCTL_SYNC_COMMAND, &error);
599 static struct cuse_dev_entered *
600 cuse_dev_get_entered(void)
602 struct cuse_dev_entered *pe;
603 pthread_t curr = pthread_self();
606 TAILQ_FOREACH(pe, &h_cuse_entered, entry) {
607 if (pe->thread == curr)
615 cuse_dev_set_per_file_handle(struct cuse_dev *cdev, void *handle)
617 struct cuse_dev_entered *pe;
619 pe = cuse_dev_get_entered();
620 if (pe == NULL || pe->cdev != cdev)
623 pe->per_file_handle = handle;
624 ioctl(f_cuse, CUSE_IOCTL_SET_PFH, &handle);
628 cuse_dev_get_per_file_handle(struct cuse_dev *cdev)
630 struct cuse_dev_entered *pe;
632 pe = cuse_dev_get_entered();
633 if (pe == NULL || pe->cdev != cdev)
636 return (pe->per_file_handle);
640 cuse_set_local(int val)
642 struct cuse_dev_entered *pe;
644 pe = cuse_dev_get_entered();
653 cuse_cmd_str(int cmd)
655 static const char *str[CUSE_CMD_MAX] = {
656 [CUSE_CMD_NONE] = "none",
657 [CUSE_CMD_OPEN] = "open",
658 [CUSE_CMD_CLOSE] = "close",
659 [CUSE_CMD_READ] = "read",
660 [CUSE_CMD_WRITE] = "write",
661 [CUSE_CMD_IOCTL] = "ioctl",
662 [CUSE_CMD_POLL] = "poll",
663 [CUSE_CMD_SIGNAL] = "signal",
664 [CUSE_CMD_SYNC] = "sync",
667 if ((cmd >= 0) && (cmd < CUSE_CMD_MAX) &&
679 struct cuse_dev_entered *pe;
681 pe = cuse_dev_get_entered();
685 return (pe->is_local);
689 cuse_copy_out(const void *src, void *user_dst, int len)
691 struct cuse_data_chunk info;
692 struct cuse_dev_entered *pe;
695 if ((f_cuse < 0) || (len < 0))
696 return (CUSE_ERR_INVALID);
698 pe = cuse_dev_get_entered();
700 return (CUSE_ERR_INVALID);
702 DPRINTF("cuse: copy_out(%p,%p,%d), cmd = %d = %s\n",
703 src, user_dst, len, pe->cmd, cuse_cmd_str(pe->cmd));
706 memcpy(user_dst, src, len);
708 info.local_ptr = (uintptr_t)src;
709 info.peer_ptr = (uintptr_t)user_dst;
712 error = ioctl(f_cuse, CUSE_IOCTL_WRITE_DATA, &info);
714 DPRINTF("cuse: copy_out() error = %d\n", errno);
715 return (CUSE_ERR_FAULT);
722 cuse_copy_in(const void *user_src, void *dst, int len)
724 struct cuse_data_chunk info;
725 struct cuse_dev_entered *pe;
728 if ((f_cuse < 0) || (len < 0))
729 return (CUSE_ERR_INVALID);
731 pe = cuse_dev_get_entered();
733 return (CUSE_ERR_INVALID);
735 DPRINTF("cuse: copy_in(%p,%p,%d), cmd = %d = %s\n",
736 user_src, dst, len, pe->cmd, cuse_cmd_str(pe->cmd));
739 memcpy(dst, user_src, len);
741 info.local_ptr = (uintptr_t)dst;
742 info.peer_ptr = (uintptr_t)user_src;
745 error = ioctl(f_cuse, CUSE_IOCTL_READ_DATA, &info);
747 DPRINTF("cuse: copy_in() error = %d\n", errno);
748 return (CUSE_ERR_FAULT);
755 cuse_dev_get_current(int *pcmd)
757 struct cuse_dev_entered *pe;
759 pe = cuse_dev_get_entered();
771 cuse_got_peer_signal(void)
773 struct cuse_dev_entered *pe;
775 pe = cuse_dev_get_entered();
777 return (CUSE_ERR_INVALID);
782 return (CUSE_ERR_OTHER);
786 cuse_poll_wakeup(void)
793 ioctl(f_cuse, CUSE_IOCTL_SELWAKEUP, &error);