3 * Copyright (c) 2010-2022 Hans Petter Selasky. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/types.h>
38 #include <sys/queue.h>
39 #include <sys/fcntl.h>
41 #include <sys/param.h>
43 #include <fs/cuse/cuse_ioctl.h>
50 static const char *cuse_cmd_str(int cmd);
52 #define DPRINTF(...) do { \
53 if (cuse_debug_level != 0) \
54 printf(__VA_ARGS__); \
57 #define DPRINTF(...) do { } while (0)
60 struct cuse_vm_allocation {
65 struct cuse_dev_entered {
66 TAILQ_ENTRY(cuse_dev_entered) entry;
68 void *per_file_handle;
69 struct cuse_dev *cdev;
76 TAILQ_ENTRY(cuse_dev) entry;
77 const struct cuse_methods *mtod;
82 static int f_cuse = -1;
84 static pthread_mutex_t m_cuse;
85 static TAILQ_HEAD(, cuse_dev) h_cuse __guarded_by(m_cuse);
86 static TAILQ_HEAD(, cuse_dev_entered) h_cuse_entered __guarded_by(m_cuse);
87 static struct cuse_vm_allocation a_cuse[CUSE_ALLOC_UNIT_MAX]
91 pthread_mutex_lock(&m_cuse)
93 #define CUSE_UNLOCK() \
94 pthread_mutex_unlock(&m_cuse)
99 pthread_mutexattr_t attr;
101 f_cuse = open("/dev/cuse", O_RDWR);
103 if (feature_present("cuse") == 0)
104 return (CUSE_ERR_NOT_LOADED);
106 return (CUSE_ERR_INVALID);
108 pthread_mutexattr_init(&attr);
109 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
110 pthread_mutex_init(&m_cuse, &attr);
113 TAILQ_INIT(&h_cuse_entered);
124 return (CUSE_ERR_INVALID);
131 pthread_mutex_destroy(&m_cuse);
133 memset(a_cuse, 0, sizeof(a_cuse));
139 cuse_vmoffset(void *_ptr)
144 unsigned long remainder;
148 for (n = remainder = 0; n != CUSE_ALLOC_UNIT_MAX; n++) {
149 if (a_cuse[n].ptr == NULL)
152 ptr_min = a_cuse[n].ptr;
153 ptr_max = a_cuse[n].ptr + a_cuse[n].size - 1;
155 if ((ptr >= ptr_min) && (ptr <= ptr_max)) {
156 remainder = (ptr - ptr_min);
162 return ((n << CUSE_ALLOC_UNIT_SHIFT) + remainder);
166 cuse_vmalloc(unsigned size)
168 struct cuse_alloc_info info;
169 unsigned long pgsize;
176 /* some sanity checks */
177 if (f_cuse < 0 || size < 1 || size > CUSE_ALLOC_BYTES_MAX)
180 memset(&info, 0, sizeof(info));
182 pgsize = getpagesize();
183 info.page_count = howmany(size, pgsize);
185 /* compute how many units the allocation needs */
186 m = howmany(size, 1 << CUSE_ALLOC_UNIT_SHIFT);
187 if (m == 0 || m > CUSE_ALLOC_UNIT_MAX)
191 for (n = 0; n <= CUSE_ALLOC_UNIT_MAX - m; ) {
192 if (a_cuse[n].size != 0) {
193 /* skip to next available unit, depending on allocation size */
194 n += howmany(a_cuse[n].size, 1 << CUSE_ALLOC_UNIT_SHIFT);
197 /* check if there are "m" free units ahead */
198 for (x = 1; x != m; x++) {
199 if (a_cuse[n + x].size != 0)
203 /* skip to next available unit, if any */
207 /* reserve this unit by setting the size to a non-zero value */
208 a_cuse[n].size = size;
213 error = ioctl(f_cuse, CUSE_IOCTL_ALLOC_MEMORY, &info);
216 ptr = mmap(NULL, info.page_count * pgsize,
217 PROT_READ | PROT_WRITE,
218 MAP_SHARED, f_cuse, n << CUSE_ALLOC_UNIT_SHIFT);
220 if (ptr != MAP_FAILED) {
225 return (ptr); /* success */
228 (void) ioctl(f_cuse, CUSE_IOCTL_FREE_MEMORY, &info);
236 return (NULL); /* failure */
240 cuse_is_vmalloc_addr(void *ptr)
244 if (f_cuse < 0 || ptr == NULL)
245 return (0); /* false */
248 for (n = 0; n != CUSE_ALLOC_UNIT_MAX; n++) {
249 if (a_cuse[n].ptr == ptr)
254 return (n != CUSE_ALLOC_UNIT_MAX);
258 cuse_vmfree(void *ptr)
260 struct cuse_vm_allocation temp;
261 struct cuse_alloc_info info;
265 if (f_cuse < 0 || ptr == NULL)
269 for (n = 0; n != CUSE_ALLOC_UNIT_MAX; n++) {
270 if (a_cuse[n].ptr != ptr)
277 munmap(temp.ptr, temp.size);
279 memset(&info, 0, sizeof(info));
283 error = ioctl(f_cuse, CUSE_IOCTL_FREE_MEMORY, &info);
286 /* ignore any errors */
287 DPRINTF("Freeing memory failed: %d\n", errno);
291 a_cuse[n].ptr = NULL;
300 cuse_alloc_unit_number_by_id(int *pnum, int id)
305 return (CUSE_ERR_INVALID);
307 *pnum = (id & CUSE_ID_MASK);
309 error = ioctl(f_cuse, CUSE_IOCTL_ALLOC_UNIT_BY_ID, pnum);
311 return (CUSE_ERR_NO_MEMORY);
318 cuse_free_unit_number_by_id(int num, int id)
323 return (CUSE_ERR_INVALID);
325 if (num != -1 || id != -1)
326 num = (id & CUSE_ID_MASK) | (num & 0xFF);
328 error = ioctl(f_cuse, CUSE_IOCTL_FREE_UNIT_BY_ID, &num);
330 return (CUSE_ERR_NO_MEMORY);
336 cuse_alloc_unit_number(int *pnum)
341 return (CUSE_ERR_INVALID);
343 error = ioctl(f_cuse, CUSE_IOCTL_ALLOC_UNIT, pnum);
345 return (CUSE_ERR_NO_MEMORY);
351 cuse_free_unit_number(int num)
356 return (CUSE_ERR_INVALID);
358 error = ioctl(f_cuse, CUSE_IOCTL_FREE_UNIT, &num);
360 return (CUSE_ERR_NO_MEMORY);
366 cuse_dev_create(const struct cuse_methods *mtod, void *priv0, void *priv1,
367 uid_t _uid, gid_t _gid, int _perms, const char *_fmt,...)
369 struct cuse_create_dev info;
370 struct cuse_dev *cdev;
377 cdev = malloc(sizeof(*cdev));
381 memset(cdev, 0, sizeof(*cdev));
387 memset(&info, 0, sizeof(info));
391 info.group_id = _gid;
392 info.permissions = _perms;
394 va_start(args, _fmt);
395 vsnprintf(info.devname, sizeof(info.devname), _fmt, args);
398 error = ioctl(f_cuse, CUSE_IOCTL_CREATE_DEV, &info);
404 TAILQ_INSERT_TAIL(&h_cuse, cdev, entry);
412 cuse_dev_destroy(struct cuse_dev *cdev)
420 TAILQ_REMOVE(&h_cuse, cdev, entry);
423 error = ioctl(f_cuse, CUSE_IOCTL_DESTROY_DEV, &cdev);
431 cuse_dev_get_priv0(struct cuse_dev *cdev)
433 return (cdev->priv0);
437 cuse_dev_get_priv1(struct cuse_dev *cdev)
439 return (cdev->priv1);
443 cuse_dev_set_priv0(struct cuse_dev *cdev, void *priv)
449 cuse_dev_set_priv1(struct cuse_dev *cdev, void *priv)
455 cuse_wait_and_process(void)
457 pthread_t curr = pthread_self();
458 struct cuse_dev_entered *pe;
459 struct cuse_dev_entered enter;
460 struct cuse_command info;
461 struct cuse_dev *cdev;
465 return (CUSE_ERR_INVALID);
467 error = ioctl(f_cuse, CUSE_IOCTL_GET_COMMAND, &info);
469 return (CUSE_ERR_OTHER);
475 enter.per_file_handle = (void *)info.per_file_handle;
476 enter.cmd = info.command;
478 enter.got_signal = 0;
480 TAILQ_INSERT_TAIL(&h_cuse_entered, &enter, entry);
483 DPRINTF("cuse: Command = %d = %s, flags = %d, arg = 0x%08x, ptr = 0x%08x\n",
484 (int)info.command, cuse_cmd_str(info.command), (int)info.fflags,
485 (int)info.argument, (int)info.data_pointer);
487 switch (info.command) {
489 if (cdev->mtod->cm_open != NULL)
490 error = (cdev->mtod->cm_open) (cdev, (int)info.fflags);
497 /* wait for other threads to stop */
504 TAILQ_FOREACH(pe, &h_cuse_entered, entry) {
505 if (pe->cdev != cdev)
507 if (pe->thread == curr)
509 if (pe->per_file_handle !=
510 enter.per_file_handle)
513 pthread_kill(pe->thread, SIGHUP);
514 error = CUSE_ERR_BUSY;
524 if (cdev->mtod->cm_close != NULL)
525 error = (cdev->mtod->cm_close) (cdev, (int)info.fflags);
531 if (cdev->mtod->cm_read != NULL) {
532 error = (cdev->mtod->cm_read) (cdev, (int)info.fflags,
533 (void *)info.data_pointer, (int)info.argument);
535 error = CUSE_ERR_INVALID;
540 if (cdev->mtod->cm_write != NULL) {
541 error = (cdev->mtod->cm_write) (cdev, (int)info.fflags,
542 (void *)info.data_pointer, (int)info.argument);
544 error = CUSE_ERR_INVALID;
549 if (cdev->mtod->cm_ioctl != NULL) {
550 error = (cdev->mtod->cm_ioctl) (cdev, (int)info.fflags,
551 (unsigned int)info.argument, (void *)info.data_pointer);
553 error = CUSE_ERR_INVALID;
558 if (cdev->mtod->cm_poll != NULL) {
559 error = (cdev->mtod->cm_poll) (cdev, (int)info.fflags,
562 error = CUSE_POLL_ERROR;
566 case CUSE_CMD_SIGNAL:
568 TAILQ_FOREACH(pe, &h_cuse_entered, entry) {
569 if (pe->cdev != cdev)
571 if (pe->thread == curr)
573 if (pe->per_file_handle !=
574 enter.per_file_handle)
577 pthread_kill(pe->thread, SIGHUP);
583 error = CUSE_ERR_INVALID;
587 DPRINTF("cuse: Command error = %d for %s\n",
588 error, cuse_cmd_str(info.command));
591 TAILQ_REMOVE(&h_cuse_entered, &enter, entry);
594 /* we ignore any sync command failures */
595 ioctl(f_cuse, CUSE_IOCTL_SYNC_COMMAND, &error);
600 static struct cuse_dev_entered *
601 cuse_dev_get_entered(void)
603 struct cuse_dev_entered *pe;
604 pthread_t curr = pthread_self();
607 TAILQ_FOREACH(pe, &h_cuse_entered, entry) {
608 if (pe->thread == curr)
616 cuse_dev_set_per_file_handle(struct cuse_dev *cdev, void *handle)
618 struct cuse_dev_entered *pe;
620 pe = cuse_dev_get_entered();
621 if (pe == NULL || pe->cdev != cdev)
624 pe->per_file_handle = handle;
625 ioctl(f_cuse, CUSE_IOCTL_SET_PFH, &handle);
629 cuse_dev_get_per_file_handle(struct cuse_dev *cdev)
631 struct cuse_dev_entered *pe;
633 pe = cuse_dev_get_entered();
634 if (pe == NULL || pe->cdev != cdev)
637 return (pe->per_file_handle);
641 cuse_set_local(int val)
643 struct cuse_dev_entered *pe;
645 pe = cuse_dev_get_entered();
654 cuse_cmd_str(int cmd)
656 static const char *str[CUSE_CMD_MAX] = {
657 [CUSE_CMD_NONE] = "none",
658 [CUSE_CMD_OPEN] = "open",
659 [CUSE_CMD_CLOSE] = "close",
660 [CUSE_CMD_READ] = "read",
661 [CUSE_CMD_WRITE] = "write",
662 [CUSE_CMD_IOCTL] = "ioctl",
663 [CUSE_CMD_POLL] = "poll",
664 [CUSE_CMD_SIGNAL] = "signal",
665 [CUSE_CMD_SYNC] = "sync",
668 if ((cmd >= 0) && (cmd < CUSE_CMD_MAX) &&
680 struct cuse_dev_entered *pe;
682 pe = cuse_dev_get_entered();
686 return (pe->is_local);
690 cuse_copy_out(const void *src, void *user_dst, int len)
692 struct cuse_data_chunk info;
693 struct cuse_dev_entered *pe;
696 if ((f_cuse < 0) || (len < 0))
697 return (CUSE_ERR_INVALID);
699 pe = cuse_dev_get_entered();
701 return (CUSE_ERR_INVALID);
703 DPRINTF("cuse: copy_out(%p,%p,%d), cmd = %d = %s\n",
704 src, user_dst, len, pe->cmd, cuse_cmd_str(pe->cmd));
707 memcpy(user_dst, src, len);
709 info.local_ptr = (uintptr_t)src;
710 info.peer_ptr = (uintptr_t)user_dst;
713 error = ioctl(f_cuse, CUSE_IOCTL_WRITE_DATA, &info);
715 DPRINTF("cuse: copy_out() error = %d\n", errno);
716 return (CUSE_ERR_FAULT);
723 cuse_copy_in(const void *user_src, void *dst, int len)
725 struct cuse_data_chunk info;
726 struct cuse_dev_entered *pe;
729 if ((f_cuse < 0) || (len < 0))
730 return (CUSE_ERR_INVALID);
732 pe = cuse_dev_get_entered();
734 return (CUSE_ERR_INVALID);
736 DPRINTF("cuse: copy_in(%p,%p,%d), cmd = %d = %s\n",
737 user_src, dst, len, pe->cmd, cuse_cmd_str(pe->cmd));
740 memcpy(dst, user_src, len);
742 info.local_ptr = (uintptr_t)dst;
743 info.peer_ptr = (uintptr_t)user_src;
746 error = ioctl(f_cuse, CUSE_IOCTL_READ_DATA, &info);
748 DPRINTF("cuse: copy_in() error = %d\n", errno);
749 return (CUSE_ERR_FAULT);
756 cuse_dev_get_current(int *pcmd)
758 struct cuse_dev_entered *pe;
760 pe = cuse_dev_get_entered();
772 cuse_got_peer_signal(void)
774 struct cuse_dev_entered *pe;
776 pe = cuse_dev_get_entered();
778 return (CUSE_ERR_INVALID);
783 return (CUSE_ERR_OTHER);
787 cuse_poll_wakeup(void)
794 ioctl(f_cuse, CUSE_IOCTL_SELWAKEUP, &error);