2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2018 Mellanox Technologies, Ltd.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include "opt_stack.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/sysctl.h>
41 #include <sys/sglist.h>
42 #include <sys/sleepqueue.h>
43 #include <sys/refcount.h>
45 #include <sys/mutex.h>
47 #include <sys/eventhandler.h>
48 #include <sys/fcntl.h>
50 #include <sys/filio.h>
51 #include <sys/rwlock.h>
53 #include <sys/stack.h>
58 #include <vm/vm_object.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_pager.h>
62 #include <machine/stdarg.h>
64 #if defined(__i386__) || defined(__amd64__)
65 #include <machine/md_var.h>
68 #include <linux/kobject.h>
69 #include <linux/device.h>
70 #include <linux/slab.h>
71 #include <linux/module.h>
72 #include <linux/moduleparam.h>
73 #include <linux/cdev.h>
74 #include <linux/file.h>
75 #include <linux/sysfs.h>
78 #include <linux/vmalloc.h>
79 #include <linux/netdevice.h>
80 #include <linux/timer.h>
81 #include <linux/interrupt.h>
82 #include <linux/uaccess.h>
83 #include <linux/list.h>
84 #include <linux/kthread.h>
85 #include <linux/kernel.h>
86 #include <linux/compat.h>
87 #include <linux/poll.h>
88 #include <linux/smp.h>
90 #if defined(__i386__) || defined(__amd64__)
94 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters");
97 SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN,
98 &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable.");
100 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat");
102 #include <linux/rbtree.h>
103 /* Undo Linux compat changes. */
107 #define RB_ROOT(head) (head)->rbh_root
109 static void linux_cdev_deref(struct linux_cdev *ldev);
110 static struct vm_area_struct *linux_cdev_handle_find(void *handle);
112 struct kobject linux_class_root;
113 struct device linux_root_device;
114 struct class linux_class_misc;
115 struct list_head pci_drivers;
116 struct list_head pci_devices;
119 unsigned long linux_timer_hz_mask;
122 panic_cmp(struct rb_node *one, struct rb_node *two)
127 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
130 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args)
140 if (old && fmt == NULL)
143 /* compute length of string */
144 va_copy(tmp_va, args);
145 len = vsnprintf(&dummy, 0, fmt, tmp_va);
148 /* account for zero termination */
151 /* check for error */
155 /* allocate memory for string */
156 name = kzalloc(len, GFP_KERNEL);
159 vsnprintf(name, len, fmt, args);
162 /* free old string */
165 /* filter new string */
166 for (; *name != '\0'; name++)
173 kobject_set_name(struct kobject *kobj, const char *fmt, ...)
179 error = kobject_set_name_vargs(kobj, fmt, args);
186 kobject_add_complete(struct kobject *kobj, struct kobject *parent)
188 const struct kobj_type *t;
191 kobj->parent = parent;
192 error = sysfs_create_dir(kobj);
193 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) {
194 struct attribute **attr;
197 for (attr = t->default_attrs; *attr != NULL; attr++) {
198 error = sysfs_create_file(kobj, *attr);
203 sysfs_remove_dir(kobj);
210 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...)
216 error = kobject_set_name_vargs(kobj, fmt, args);
221 return kobject_add_complete(kobj, parent);
225 linux_kobject_release(struct kref *kref)
227 struct kobject *kobj;
230 kobj = container_of(kref, struct kobject, kref);
231 sysfs_remove_dir(kobj);
233 if (kobj->ktype && kobj->ktype->release)
234 kobj->ktype->release(kobj);
239 linux_kobject_kfree(struct kobject *kobj)
245 linux_kobject_kfree_name(struct kobject *kobj)
252 const struct kobj_type linux_kfree_type = {
253 .release = linux_kobject_kfree
257 linux_device_release(struct device *dev)
259 pr_debug("linux_device_release: %s\n", dev_name(dev));
264 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf)
266 struct class_attribute *dattr;
269 dattr = container_of(attr, struct class_attribute, attr);
272 error = dattr->show(container_of(kobj, struct class, kobj),
278 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf,
281 struct class_attribute *dattr;
284 dattr = container_of(attr, struct class_attribute, attr);
287 error = dattr->store(container_of(kobj, struct class, kobj),
293 linux_class_release(struct kobject *kobj)
297 class = container_of(kobj, struct class, kobj);
298 if (class->class_release)
299 class->class_release(class);
302 static const struct sysfs_ops linux_class_sysfs = {
303 .show = linux_class_show,
304 .store = linux_class_store,
307 const struct kobj_type linux_class_ktype = {
308 .release = linux_class_release,
309 .sysfs_ops = &linux_class_sysfs
313 linux_dev_release(struct kobject *kobj)
317 dev = container_of(kobj, struct device, kobj);
318 /* This is the precedence defined by linux. */
321 else if (dev->class && dev->class->dev_release)
322 dev->class->dev_release(dev);
326 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf)
328 struct device_attribute *dattr;
331 dattr = container_of(attr, struct device_attribute, attr);
334 error = dattr->show(container_of(kobj, struct device, kobj),
340 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf,
343 struct device_attribute *dattr;
346 dattr = container_of(attr, struct device_attribute, attr);
349 error = dattr->store(container_of(kobj, struct device, kobj),
354 static const struct sysfs_ops linux_dev_sysfs = {
355 .show = linux_dev_show,
356 .store = linux_dev_store,
359 const struct kobj_type linux_dev_ktype = {
360 .release = linux_dev_release,
361 .sysfs_ops = &linux_dev_sysfs
365 device_create(struct class *class, struct device *parent, dev_t devt,
366 void *drvdata, const char *fmt, ...)
371 dev = kzalloc(sizeof(*dev), M_WAITOK);
372 dev->parent = parent;
375 dev->driver_data = drvdata;
376 dev->release = linux_device_release;
378 kobject_set_name_vargs(&dev->kobj, fmt, args);
380 device_register(dev);
386 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype,
387 struct kobject *parent, const char *fmt, ...)
392 kobject_init(kobj, ktype);
394 kobj->parent = parent;
398 error = kobject_set_name_vargs(kobj, fmt, args);
402 return kobject_add_complete(kobj, parent);
406 linux_kq_lock(void *arg)
413 linux_kq_unlock(void *arg)
421 linux_kq_lock_owned(void *arg)
426 mtx_assert(&s->m, MA_OWNED);
431 linux_kq_lock_unowned(void *arg)
436 mtx_assert(&s->m, MA_NOTOWNED);
441 linux_file_kqfilter_poll(struct linux_file *, int);
444 linux_file_alloc(void)
446 struct linux_file *filp;
448 filp = kzalloc(sizeof(*filp), GFP_KERNEL);
450 /* set initial refcount */
453 /* setup fields needed by kqueue support */
454 spin_lock_init(&filp->f_kqlock);
455 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock,
456 linux_kq_lock, linux_kq_unlock,
457 linux_kq_lock_owned, linux_kq_lock_unowned);
463 linux_file_free(struct linux_file *filp)
465 if (filp->_file == NULL) {
466 if (filp->f_shmem != NULL)
467 vm_object_deallocate(filp->f_shmem);
471 * The close method of the character device or file
472 * will free the linux_file structure:
474 _fdrop(filp->_file, curthread);
479 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
482 struct vm_area_struct *vmap;
484 vmap = linux_cdev_handle_find(vm_obj->handle);
487 MPASS(vmap->vm_private_data == vm_obj->handle);
489 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) {
490 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset;
493 if (((*mres)->flags & PG_FICTITIOUS) != 0) {
495 * If the passed in result page is a fake
496 * page, update it with the new physical
500 vm_page_updatefake(page, paddr, vm_obj->memattr);
503 * Replace the passed in "mres" page with our
504 * own fake page and free up the all of the
507 VM_OBJECT_WUNLOCK(vm_obj);
508 page = vm_page_getfake(paddr, vm_obj->memattr);
509 VM_OBJECT_WLOCK(vm_obj);
511 vm_page_replace_checked(page, vm_obj,
512 (*mres)->pindex, *mres);
516 vm_page_unlock(*mres);
519 page->valid = VM_PAGE_BITS_ALL;
520 return (VM_PAGER_OK);
522 return (VM_PAGER_FAIL);
526 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type,
527 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
529 struct vm_area_struct *vmap;
532 linux_set_current(curthread);
534 /* get VM area structure */
535 vmap = linux_cdev_handle_find(vm_obj->handle);
537 MPASS(vmap->vm_private_data == vm_obj->handle);
539 VM_OBJECT_WUNLOCK(vm_obj);
541 down_write(&vmap->vm_mm->mmap_sem);
542 if (unlikely(vmap->vm_ops == NULL)) {
543 err = VM_FAULT_SIGBUS;
547 /* fill out VM fault structure */
548 vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx);
549 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
554 vmap->vm_pfn_count = 0;
555 vmap->vm_pfn_pcount = &vmap->vm_pfn_count;
556 vmap->vm_obj = vm_obj;
558 err = vmap->vm_ops->fault(vmap, &vmf);
560 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) {
561 kern_yield(PRI_USER);
562 err = vmap->vm_ops->fault(vmap, &vmf);
566 /* translate return code */
569 err = VM_PAGER_AGAIN;
571 case VM_FAULT_SIGBUS:
574 case VM_FAULT_NOPAGE:
576 * By contract the fault handler will return having
577 * busied all the pages itself. If pidx is already
578 * found in the object, it will simply xbusy the first
579 * page and return with vm_pfn_count set to 1.
581 *first = vmap->vm_pfn_first;
582 *last = *first + vmap->vm_pfn_count - 1;
586 err = VM_PAGER_ERROR;
589 up_write(&vmap->vm_mm->mmap_sem);
590 VM_OBJECT_WLOCK(vm_obj);
594 static struct rwlock linux_vma_lock;
595 static TAILQ_HEAD(, vm_area_struct) linux_vma_head =
596 TAILQ_HEAD_INITIALIZER(linux_vma_head);
599 linux_cdev_handle_free(struct vm_area_struct *vmap)
601 /* Drop reference on vm_file */
602 if (vmap->vm_file != NULL)
605 /* Drop reference on mm_struct */
612 linux_cdev_handle_remove(struct vm_area_struct *vmap)
614 rw_wlock(&linux_vma_lock);
615 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry);
616 rw_wunlock(&linux_vma_lock);
619 static struct vm_area_struct *
620 linux_cdev_handle_find(void *handle)
622 struct vm_area_struct *vmap;
624 rw_rlock(&linux_vma_lock);
625 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) {
626 if (vmap->vm_private_data == handle)
629 rw_runlock(&linux_vma_lock);
634 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
635 vm_ooffset_t foff, struct ucred *cred, u_short *color)
638 MPASS(linux_cdev_handle_find(handle) != NULL);
644 linux_cdev_pager_dtor(void *handle)
646 const struct vm_operations_struct *vm_ops;
647 struct vm_area_struct *vmap;
649 vmap = linux_cdev_handle_find(handle);
653 * Remove handle before calling close operation to prevent
654 * other threads from reusing the handle pointer.
656 linux_cdev_handle_remove(vmap);
658 down_write(&vmap->vm_mm->mmap_sem);
659 vm_ops = vmap->vm_ops;
660 if (likely(vm_ops != NULL))
662 up_write(&vmap->vm_mm->mmap_sem);
664 linux_cdev_handle_free(vmap);
667 static struct cdev_pager_ops linux_cdev_pager_ops[2] = {
670 .cdev_pg_populate = linux_cdev_pager_populate,
671 .cdev_pg_ctor = linux_cdev_pager_ctor,
672 .cdev_pg_dtor = linux_cdev_pager_dtor
676 .cdev_pg_fault = linux_cdev_pager_fault,
677 .cdev_pg_ctor = linux_cdev_pager_ctor,
678 .cdev_pg_dtor = linux_cdev_pager_dtor
683 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
690 if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0)
692 VM_OBJECT_RLOCK(obj);
693 for (m = vm_page_find_least(obj, OFF_TO_IDX(address));
694 m != NULL && m->pindex < OFF_TO_IDX(address + size);
695 m = TAILQ_NEXT(m, listq))
697 VM_OBJECT_RUNLOCK(obj);
701 static struct file_operations dummy_ldev_ops = {
705 static struct linux_cdev dummy_ldev = {
706 .ops = &dummy_ldev_ops,
709 #define LDEV_SI_DTR 0x0001
710 #define LDEV_SI_REF 0x0002
713 linux_get_fop(struct linux_file *filp, const struct file_operations **fop,
714 struct linux_cdev **dev)
716 struct linux_cdev *ldev;
722 for (siref = ldev->siref;;) {
723 if ((siref & LDEV_SI_DTR) != 0) {
727 MPASS((ldev->siref & LDEV_SI_DTR) == 0);
728 } else if (atomic_fcmpset_int(&ldev->siref, &siref,
729 siref + LDEV_SI_REF)) {
738 linux_drop_fop(struct linux_cdev *ldev)
743 MPASS((ldev->siref & ~LDEV_SI_DTR) != 0);
744 atomic_subtract_int(&ldev->siref, LDEV_SI_REF);
747 #define OPW(fp,td,code) ({ \
748 struct file *__fpop; \
749 __typeof(code) __retval; \
751 __fpop = (td)->td_fpop; \
752 (td)->td_fpop = (fp); \
754 (td)->td_fpop = __fpop; \
759 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td,
762 struct linux_cdev *ldev;
763 struct linux_file *filp;
764 const struct file_operations *fop;
769 filp = linux_file_alloc();
770 filp->f_dentry = &filp->f_dentry_store;
771 filp->f_op = ldev->ops;
772 filp->f_mode = file->f_flag;
773 filp->f_flags = file->f_flag;
774 filp->f_vnode = file->f_vnode;
776 refcount_acquire(&ldev->refs);
779 linux_set_current(td);
780 linux_get_fop(filp, &fop, &ldev);
782 if (fop->open != NULL) {
783 error = -fop->open(file->f_vnode, filp);
785 linux_drop_fop(ldev);
786 linux_cdev_deref(filp->f_cdev);
792 /* hold on to the vnode - used for fstat() */
793 vhold(filp->f_vnode);
795 /* release the file from devfs */
796 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops);
797 linux_drop_fop(ldev);
801 #define LINUX_IOCTL_MIN_PTR 0x10000UL
802 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX)
805 linux_remap_address(void **uaddr, size_t len)
807 uintptr_t uaddr_val = (uintptr_t)(*uaddr);
809 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR &&
810 uaddr_val < LINUX_IOCTL_MAX_PTR)) {
811 struct task_struct *pts = current;
817 /* compute data offset */
818 uaddr_val -= LINUX_IOCTL_MIN_PTR;
820 /* check that length is within bounds */
821 if ((len > IOCPARM_MAX) ||
822 (uaddr_val + len) > pts->bsd_ioctl_len) {
827 /* re-add kernel buffer address */
828 uaddr_val += (uintptr_t)pts->bsd_ioctl_data;
830 /* update address location */
831 *uaddr = (void *)uaddr_val;
838 linux_copyin(const void *uaddr, void *kaddr, size_t len)
840 if (linux_remap_address(__DECONST(void **, &uaddr), len)) {
843 memcpy(kaddr, uaddr, len);
846 return (-copyin(uaddr, kaddr, len));
850 linux_copyout(const void *kaddr, void *uaddr, size_t len)
852 if (linux_remap_address(&uaddr, len)) {
855 memcpy(uaddr, kaddr, len);
858 return (-copyout(kaddr, uaddr, len));
862 linux_clear_user(void *_uaddr, size_t _len)
864 uint8_t *uaddr = _uaddr;
867 /* make sure uaddr is aligned before going into the fast loop */
868 while (((uintptr_t)uaddr & 7) != 0 && len > 7) {
869 if (subyte(uaddr, 0))
875 /* zero 8 bytes at a time */
878 if (suword64(uaddr, 0))
881 if (suword32(uaddr, 0))
883 if (suword32(uaddr + 4, 0))
890 /* zero fill end, if any */
892 if (subyte(uaddr, 0))
901 linux_access_ok(const void *uaddr, size_t len)
906 /* get start and end address */
907 saddr = (uintptr_t)uaddr;
908 eaddr = (uintptr_t)uaddr + len;
910 /* verify addresses are valid for userspace */
911 return ((saddr == eaddr) ||
912 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS));
916 * This function should return either EINTR or ERESTART depending on
917 * the signal type sent to this thread:
920 linux_get_error(struct task_struct *task, int error)
922 /* check for signal type interrupt code */
923 if (error == EINTR || error == ERESTARTSYS || error == ERESTART) {
924 error = -linux_schedule_get_interrupt_value(task);
932 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp,
933 const struct file_operations *fop, u_long cmd, caddr_t data,
936 struct task_struct *task = current;
940 size = IOCPARM_LEN(cmd);
941 /* refer to logic in sys_ioctl() */
944 * Setup hint for linux_copyin() and linux_copyout().
946 * Background: Linux code expects a user-space address
947 * while FreeBSD supplies a kernel-space address.
949 task->bsd_ioctl_data = data;
950 task->bsd_ioctl_len = size;
951 data = (void *)LINUX_IOCTL_MIN_PTR;
953 /* fetch user-space pointer */
954 data = *(void **)data;
956 #if defined(__amd64__)
957 if (td->td_proc->p_elf_machine == EM_386) {
958 /* try the compat IOCTL handler first */
959 if (fop->compat_ioctl != NULL) {
960 error = -OPW(fp, td, fop->compat_ioctl(filp,
966 /* fallback to the regular IOCTL handler, if any */
967 if (error == ENOTTY && fop->unlocked_ioctl != NULL) {
968 error = -OPW(fp, td, fop->unlocked_ioctl(filp,
974 if (fop->unlocked_ioctl != NULL) {
975 error = -OPW(fp, td, fop->unlocked_ioctl(filp,
982 task->bsd_ioctl_data = NULL;
983 task->bsd_ioctl_len = 0;
986 if (error == EWOULDBLOCK) {
987 /* update kqfilter status, if any */
988 linux_file_kqfilter_poll(filp,
989 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE);
991 error = linux_get_error(task, error);
996 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1)
999 * This function atomically updates the poll wakeup state and returns
1000 * the previous state at the time of update.
1003 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate)
1009 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c)
1017 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key)
1019 static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
1020 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */
1021 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */
1022 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY,
1023 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */
1025 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq);
1027 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
1028 case LINUX_FWQ_STATE_QUEUED:
1029 linux_poll_wakeup(filp);
1037 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p)
1039 static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
1040 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY,
1041 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */
1042 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */
1043 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED,
1046 /* check if we are called inside the select system call */
1047 if (p == LINUX_POLL_TABLE_NORMAL)
1048 selrecord(curthread, &filp->f_selinfo);
1050 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
1051 case LINUX_FWQ_STATE_INIT:
1052 /* NOTE: file handles can only belong to one wait-queue */
1053 filp->f_wait_queue.wqh = wqh;
1054 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback;
1055 add_wait_queue(wqh, &filp->f_wait_queue.wq);
1056 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED);
1064 linux_poll_wait_dequeue(struct linux_file *filp)
1066 static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
1067 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */
1068 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT,
1069 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT,
1070 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT,
1073 seldrain(&filp->f_selinfo);
1075 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
1076 case LINUX_FWQ_STATE_NOT_READY:
1077 case LINUX_FWQ_STATE_QUEUED:
1078 case LINUX_FWQ_STATE_READY:
1079 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq);
1087 linux_poll_wakeup(struct linux_file *filp)
1089 /* this function should be NULL-safe */
1093 selwakeup(&filp->f_selinfo);
1095 spin_lock(&filp->f_kqlock);
1096 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ |
1097 LINUX_KQ_FLAG_NEED_WRITE;
1099 /* make sure the "knote" gets woken up */
1100 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1);
1101 spin_unlock(&filp->f_kqlock);
1105 linux_file_kqfilter_detach(struct knote *kn)
1107 struct linux_file *filp = kn->kn_hook;
1109 spin_lock(&filp->f_kqlock);
1110 knlist_remove(&filp->f_selinfo.si_note, kn, 1);
1111 spin_unlock(&filp->f_kqlock);
1115 linux_file_kqfilter_read_event(struct knote *kn, long hint)
1117 struct linux_file *filp = kn->kn_hook;
1119 mtx_assert(&filp->f_kqlock.m, MA_OWNED);
1121 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0);
1125 linux_file_kqfilter_write_event(struct knote *kn, long hint)
1127 struct linux_file *filp = kn->kn_hook;
1129 mtx_assert(&filp->f_kqlock.m, MA_OWNED);
1131 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0);
1134 static struct filterops linux_dev_kqfiltops_read = {
1136 .f_detach = linux_file_kqfilter_detach,
1137 .f_event = linux_file_kqfilter_read_event,
1140 static struct filterops linux_dev_kqfiltops_write = {
1142 .f_detach = linux_file_kqfilter_detach,
1143 .f_event = linux_file_kqfilter_write_event,
1147 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags)
1150 const struct file_operations *fop;
1151 struct linux_cdev *ldev;
1154 if ((filp->f_kqflags & kqflags) == 0)
1159 linux_get_fop(filp, &fop, &ldev);
1160 /* get the latest polling state */
1161 temp = OPW(filp->_file, td, fop->poll(filp, NULL));
1162 linux_drop_fop(ldev);
1164 spin_lock(&filp->f_kqlock);
1166 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ |
1167 LINUX_KQ_FLAG_NEED_WRITE);
1168 /* update kqflags */
1169 if ((temp & (POLLIN | POLLOUT)) != 0) {
1170 if ((temp & POLLIN) != 0)
1171 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ;
1172 if ((temp & POLLOUT) != 0)
1173 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE;
1175 /* make sure the "knote" gets woken up */
1176 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0);
1178 spin_unlock(&filp->f_kqlock);
1182 linux_file_kqfilter(struct file *file, struct knote *kn)
1184 struct linux_file *filp;
1189 filp = (struct linux_file *)file->f_data;
1190 filp->f_flags = file->f_flag;
1191 if (filp->f_op->poll == NULL)
1194 spin_lock(&filp->f_kqlock);
1195 switch (kn->kn_filter) {
1197 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ;
1198 kn->kn_fop = &linux_dev_kqfiltops_read;
1200 knlist_add(&filp->f_selinfo.si_note, kn, 1);
1204 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE;
1205 kn->kn_fop = &linux_dev_kqfiltops_write;
1207 knlist_add(&filp->f_selinfo.si_note, kn, 1);
1214 spin_unlock(&filp->f_kqlock);
1217 linux_set_current(td);
1219 /* update kqfilter status, if any */
1220 linux_file_kqfilter_poll(filp,
1221 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE);
1227 linux_file_mmap_single(struct file *fp, const struct file_operations *fop,
1228 vm_ooffset_t *offset, vm_size_t size, struct vm_object **object,
1229 int nprot, struct thread *td)
1231 struct task_struct *task;
1232 struct vm_area_struct *vmap;
1233 struct mm_struct *mm;
1234 struct linux_file *filp;
1238 filp = (struct linux_file *)fp->f_data;
1239 filp->f_flags = fp->f_flag;
1241 if (fop->mmap == NULL)
1242 return (EOPNOTSUPP);
1244 linux_set_current(td);
1247 * The same VM object might be shared by multiple processes
1248 * and the mm_struct is usually freed when a process exits.
1250 * The atomic reference below makes sure the mm_struct is
1251 * available as long as the vmap is in the linux_vma_head.
1255 if (atomic_inc_not_zero(&mm->mm_users) == 0)
1258 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL);
1260 vmap->vm_end = size;
1261 vmap->vm_pgoff = *offset / PAGE_SIZE;
1263 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL);
1264 vmap->vm_ops = NULL;
1265 vmap->vm_file = get_file(filp);
1268 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) {
1269 error = linux_get_error(task, EINTR);
1271 error = -OPW(fp, td, fop->mmap(filp, vmap));
1272 error = linux_get_error(task, error);
1273 up_write(&vmap->vm_mm->mmap_sem);
1277 linux_cdev_handle_free(vmap);
1281 attr = pgprot2cachemode(vmap->vm_page_prot);
1283 if (vmap->vm_ops != NULL) {
1284 struct vm_area_struct *ptr;
1285 void *vm_private_data;
1288 if (vmap->vm_ops->open == NULL ||
1289 vmap->vm_ops->close == NULL ||
1290 vmap->vm_private_data == NULL) {
1291 /* free allocated VM area struct */
1292 linux_cdev_handle_free(vmap);
1296 vm_private_data = vmap->vm_private_data;
1298 rw_wlock(&linux_vma_lock);
1299 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) {
1300 if (ptr->vm_private_data == vm_private_data)
1303 /* check if there is an existing VM area struct */
1305 /* check if the VM area structure is invalid */
1306 if (ptr->vm_ops == NULL ||
1307 ptr->vm_ops->open == NULL ||
1308 ptr->vm_ops->close == NULL) {
1313 vm_no_fault = (ptr->vm_ops->fault == NULL);
1316 /* insert VM area structure into list */
1317 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry);
1319 vm_no_fault = (vmap->vm_ops->fault == NULL);
1321 rw_wunlock(&linux_vma_lock);
1324 /* free allocated VM area struct */
1325 linux_cdev_handle_free(vmap);
1326 /* check for stale VM area struct */
1327 if (error != EEXIST)
1331 /* check if there is no fault handler */
1333 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE,
1334 &linux_cdev_pager_ops[1], size, nprot, *offset,
1337 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE,
1338 &linux_cdev_pager_ops[0], size, nprot, *offset,
1342 /* check if allocating the VM object failed */
1343 if (*object == NULL) {
1345 /* remove VM area struct from list */
1346 linux_cdev_handle_remove(vmap);
1347 /* free allocated VM area struct */
1348 linux_cdev_handle_free(vmap);
1355 sg = sglist_alloc(1, M_WAITOK);
1356 sglist_append_phys(sg,
1357 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len);
1359 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len,
1360 nprot, 0, td->td_ucred);
1362 linux_cdev_handle_free(vmap);
1364 if (*object == NULL) {
1370 if (attr != VM_MEMATTR_DEFAULT) {
1371 VM_OBJECT_WLOCK(*object);
1372 vm_object_set_memattr(*object, attr);
1373 VM_OBJECT_WUNLOCK(*object);
1379 struct cdevsw linuxcdevsw = {
1380 .d_version = D_VERSION,
1381 .d_fdopen = linux_dev_fdopen,
1382 .d_name = "lkpidev",
1386 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred,
1387 int flags, struct thread *td)
1389 struct linux_file *filp;
1390 const struct file_operations *fop;
1391 struct linux_cdev *ldev;
1396 filp = (struct linux_file *)file->f_data;
1397 filp->f_flags = file->f_flag;
1398 /* XXX no support for I/O vectors currently */
1399 if (uio->uio_iovcnt != 1)
1400 return (EOPNOTSUPP);
1401 if (uio->uio_resid > DEVFS_IOSIZE_MAX)
1403 linux_set_current(td);
1404 linux_get_fop(filp, &fop, &ldev);
1405 if (fop->read != NULL) {
1406 bytes = OPW(file, td, fop->read(filp,
1407 uio->uio_iov->iov_base,
1408 uio->uio_iov->iov_len, &uio->uio_offset));
1410 uio->uio_iov->iov_base =
1411 ((uint8_t *)uio->uio_iov->iov_base) + bytes;
1412 uio->uio_iov->iov_len -= bytes;
1413 uio->uio_resid -= bytes;
1415 error = linux_get_error(current, -bytes);
1420 /* update kqfilter status, if any */
1421 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ);
1422 linux_drop_fop(ldev);
1428 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred,
1429 int flags, struct thread *td)
1431 struct linux_file *filp;
1432 const struct file_operations *fop;
1433 struct linux_cdev *ldev;
1437 filp = (struct linux_file *)file->f_data;
1438 filp->f_flags = file->f_flag;
1439 /* XXX no support for I/O vectors currently */
1440 if (uio->uio_iovcnt != 1)
1441 return (EOPNOTSUPP);
1442 if (uio->uio_resid > DEVFS_IOSIZE_MAX)
1444 linux_set_current(td);
1445 linux_get_fop(filp, &fop, &ldev);
1446 if (fop->write != NULL) {
1447 bytes = OPW(file, td, fop->write(filp,
1448 uio->uio_iov->iov_base,
1449 uio->uio_iov->iov_len, &uio->uio_offset));
1451 uio->uio_iov->iov_base =
1452 ((uint8_t *)uio->uio_iov->iov_base) + bytes;
1453 uio->uio_iov->iov_len -= bytes;
1454 uio->uio_resid -= bytes;
1457 error = linux_get_error(current, -bytes);
1462 /* update kqfilter status, if any */
1463 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE);
1465 linux_drop_fop(ldev);
1471 linux_file_poll(struct file *file, int events, struct ucred *active_cred,
1474 struct linux_file *filp;
1475 const struct file_operations *fop;
1476 struct linux_cdev *ldev;
1479 filp = (struct linux_file *)file->f_data;
1480 filp->f_flags = file->f_flag;
1481 linux_set_current(td);
1482 linux_get_fop(filp, &fop, &ldev);
1483 if (fop->poll != NULL) {
1484 revents = OPW(file, td, fop->poll(filp,
1485 LINUX_POLL_TABLE_NORMAL)) & events;
1489 linux_drop_fop(ldev);
1494 linux_file_close(struct file *file, struct thread *td)
1496 struct linux_file *filp;
1497 const struct file_operations *fop;
1498 struct linux_cdev *ldev;
1501 filp = (struct linux_file *)file->f_data;
1503 KASSERT(file_count(filp) == 0,
1504 ("File refcount(%d) is not zero", file_count(filp)));
1507 filp->f_flags = file->f_flag;
1508 linux_set_current(td);
1509 linux_poll_wait_dequeue(filp);
1510 linux_get_fop(filp, &fop, &ldev);
1511 if (fop->release != NULL)
1512 error = -OPW(file, td, fop->release(filp->f_vnode, filp));
1513 funsetown(&filp->f_sigio);
1514 if (filp->f_vnode != NULL)
1515 vdrop(filp->f_vnode);
1516 linux_drop_fop(ldev);
1517 if (filp->f_cdev != NULL)
1518 linux_cdev_deref(filp->f_cdev);
1525 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred,
1528 struct linux_file *filp;
1529 const struct file_operations *fop;
1530 struct linux_cdev *ldev;
1534 filp = (struct linux_file *)fp->f_data;
1535 filp->f_flags = fp->f_flag;
1536 linux_get_fop(filp, &fop, &ldev);
1538 linux_set_current(td);
1543 if (fop->fasync == NULL)
1545 error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC));
1548 error = fsetown(*(int *)data, &filp->f_sigio);
1550 if (fop->fasync == NULL)
1552 error = -OPW(fp, td, fop->fasync(0, filp,
1553 fp->f_flag & FASYNC));
1557 *(int *)data = fgetown(&filp->f_sigio);
1560 error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td);
1563 linux_drop_fop(ldev);
1568 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot,
1569 vm_prot_t *maxprotp, int *flagsp, struct file *fp,
1570 vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp)
1573 * Character devices do not provide private mappings
1576 if ((*maxprotp & VM_PROT_WRITE) == 0 &&
1577 (prot & VM_PROT_WRITE) != 0)
1579 if ((*flagsp & (MAP_PRIVATE | MAP_COPY)) != 0)
1582 return (linux_file_mmap_single(fp, fop, foff, objsize, objp,
1587 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size,
1588 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff,
1591 struct linux_file *filp;
1592 const struct file_operations *fop;
1593 struct linux_cdev *ldev;
1600 filp = (struct linux_file *)fp->f_data;
1604 return (EOPNOTSUPP);
1607 * Ensure that file and memory protections are
1611 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) {
1612 maxprot = VM_PROT_NONE;
1613 if ((prot & VM_PROT_EXECUTE) != 0)
1616 maxprot = VM_PROT_EXECUTE;
1617 if ((fp->f_flag & FREAD) != 0)
1618 maxprot |= VM_PROT_READ;
1619 else if ((prot & VM_PROT_READ) != 0)
1623 * If we are sharing potential changes via MAP_SHARED and we
1624 * are trying to get write permission although we opened it
1625 * without asking for it, bail out.
1627 * Note that most character devices always share mappings.
1629 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE
1630 * requests rather than doing it here.
1632 if ((flags & MAP_SHARED) != 0) {
1633 if ((fp->f_flag & FWRITE) != 0)
1634 maxprot |= VM_PROT_WRITE;
1635 else if ((prot & VM_PROT_WRITE) != 0)
1638 maxprot &= cap_maxprot;
1640 linux_get_fop(filp, &fop, &ldev);
1641 error = linux_file_mmap_sub(td, size, prot, &maxprot, &flags, fp,
1642 &foff, fop, &object);
1646 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
1649 vm_object_deallocate(object);
1651 linux_drop_fop(ldev);
1656 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
1659 struct linux_file *filp;
1663 filp = (struct linux_file *)fp->f_data;
1664 if (filp->f_vnode == NULL)
1665 return (EOPNOTSUPP);
1669 vn_lock(vp, LK_SHARED | LK_RETRY);
1670 error = vn_stat(vp, sb, td->td_ucred, NOCRED, td);
1677 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1678 struct filedesc *fdp)
1680 struct linux_file *filp;
1688 kif->kf_type = KF_TYPE_DEV;
1691 FILEDESC_SUNLOCK(fdp);
1692 error = vn_fill_kinfo_vnode(vp, kif);
1694 kif->kf_type = KF_TYPE_VNODE;
1695 FILEDESC_SLOCK(fdp);
1701 linux_iminor(struct inode *inode)
1703 struct linux_cdev *ldev;
1705 if (inode == NULL || inode->v_rdev == NULL ||
1706 inode->v_rdev->si_devsw != &linuxcdevsw)
1708 ldev = inode->v_rdev->si_drv1;
1712 return (minor(ldev->dev));
1715 struct fileops linuxfileops = {
1716 .fo_read = linux_file_read,
1717 .fo_write = linux_file_write,
1718 .fo_truncate = invfo_truncate,
1719 .fo_kqfilter = linux_file_kqfilter,
1720 .fo_stat = linux_file_stat,
1721 .fo_fill_kinfo = linux_file_fill_kinfo,
1722 .fo_poll = linux_file_poll,
1723 .fo_close = linux_file_close,
1724 .fo_ioctl = linux_file_ioctl,
1725 .fo_mmap = linux_file_mmap,
1726 .fo_chmod = invfo_chmod,
1727 .fo_chown = invfo_chown,
1728 .fo_sendfile = invfo_sendfile,
1729 .fo_flags = DFLAG_PASSABLE,
1733 * Hash of vmmap addresses. This is infrequently accessed and does not
1734 * need to be particularly large. This is done because we must store the
1735 * caller's idea of the map size to properly unmap.
1738 LIST_ENTRY(vmmap) vm_next;
1740 unsigned long vm_size;
1744 struct vmmap *lh_first;
1746 #define VMMAP_HASH_SIZE 64
1747 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1)
1748 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK
1749 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE];
1750 static struct mtx vmmaplock;
1753 vmmap_add(void *addr, unsigned long size)
1755 struct vmmap *vmmap;
1757 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL);
1758 mtx_lock(&vmmaplock);
1759 vmmap->vm_size = size;
1760 vmmap->vm_addr = addr;
1761 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next);
1762 mtx_unlock(&vmmaplock);
1765 static struct vmmap *
1766 vmmap_remove(void *addr)
1768 struct vmmap *vmmap;
1770 mtx_lock(&vmmaplock);
1771 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
1772 if (vmmap->vm_addr == addr)
1775 LIST_REMOVE(vmmap, vm_next);
1776 mtx_unlock(&vmmaplock);
1781 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__)
1783 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr)
1787 addr = pmap_mapdev_attr(phys_addr, size, attr);
1790 vmmap_add(addr, size);
1799 struct vmmap *vmmap;
1801 vmmap = vmmap_remove(addr);
1804 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__)
1805 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size);
1812 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
1817 size = count * PAGE_SIZE;
1818 off = kva_alloc(size);
1821 vmmap_add((void *)off, size);
1822 pmap_qenter(off, pages, count);
1824 return ((void *)off);
1830 struct vmmap *vmmap;
1832 vmmap = vmmap_remove(addr);
1835 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE);
1836 kva_free((vm_offset_t)addr, vmmap->vm_size);
1841 kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
1848 len = vsnprintf(NULL, 0, fmt, aq);
1851 p = kmalloc(len + 1, gfp);
1853 vsnprintf(p, len + 1, fmt, ap);
1859 kasprintf(gfp_t gfp, const char *fmt, ...)
1865 p = kvasprintf(gfp, fmt, ap);
1872 linux_timer_callback_wrapper(void *context)
1874 struct timer_list *timer;
1876 linux_set_current(curthread);
1879 timer->function(timer->data);
1883 mod_timer(struct timer_list *timer, int expires)
1886 timer->expires = expires;
1887 callout_reset(&timer->callout,
1888 linux_timer_jiffies_until(expires),
1889 &linux_timer_callback_wrapper, timer);
1893 add_timer(struct timer_list *timer)
1896 callout_reset(&timer->callout,
1897 linux_timer_jiffies_until(timer->expires),
1898 &linux_timer_callback_wrapper, timer);
1902 add_timer_on(struct timer_list *timer, int cpu)
1905 callout_reset_on(&timer->callout,
1906 linux_timer_jiffies_until(timer->expires),
1907 &linux_timer_callback_wrapper, timer, cpu);
1911 del_timer(struct timer_list *timer)
1914 if (callout_stop(&(timer)->callout) == -1)
1920 linux_timer_init(void *arg)
1924 * Compute an internal HZ value which can divide 2**32 to
1925 * avoid timer rounding problems when the tick value wraps
1928 linux_timer_hz_mask = 1;
1929 while (linux_timer_hz_mask < (unsigned long)hz)
1930 linux_timer_hz_mask *= 2;
1931 linux_timer_hz_mask--;
1933 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL);
1936 linux_complete_common(struct completion *c, int all)
1943 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
1945 if (c->done != UINT_MAX)
1947 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
1955 * Indefinite wait for done != 0 with or without signals.
1958 linux_wait_for_common(struct completion *c, int flags)
1960 struct task_struct *task;
1963 if (SCHEDULER_STOPPED())
1969 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
1971 flags = SLEEPQ_SLEEP;
1977 sleepq_add(c, NULL, "completion", flags, 0);
1978 if (flags & SLEEPQ_INTERRUPTIBLE) {
1980 error = -sleepq_wait_sig(c, 0);
1983 linux_schedule_save_interrupt_value(task, error);
1984 error = -ERESTARTSYS;
1993 if (c->done != UINT_MAX)
2002 * Time limited wait for done != 0 with or without signals.
2005 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags)
2007 struct task_struct *task;
2008 int end = jiffies + timeout;
2011 if (SCHEDULER_STOPPED())
2017 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
2019 flags = SLEEPQ_SLEEP;
2025 sleepq_add(c, NULL, "completion", flags, 0);
2026 sleepq_set_timeout(c, linux_timer_jiffies_until(end));
2029 if (flags & SLEEPQ_INTERRUPTIBLE)
2030 error = -sleepq_timedwait_sig(c, 0);
2032 error = -sleepq_timedwait(c, 0);
2036 /* check for timeout */
2037 if (error == -EWOULDBLOCK) {
2038 error = 0; /* timeout */
2040 /* signal happened */
2041 linux_schedule_save_interrupt_value(task, error);
2042 error = -ERESTARTSYS;
2047 if (c->done != UINT_MAX)
2051 /* return how many jiffies are left */
2052 error = linux_timer_jiffies_until(end);
2058 linux_try_wait_for_completion(struct completion *c)
2063 isdone = (c->done != 0);
2064 if (c->done != 0 && c->done != UINT_MAX)
2071 linux_completion_done(struct completion *c)
2076 isdone = (c->done != 0);
2082 linux_cdev_deref(struct linux_cdev *ldev)
2085 if (refcount_release(&ldev->refs))
2090 linux_cdev_release(struct kobject *kobj)
2092 struct linux_cdev *cdev;
2093 struct kobject *parent;
2095 cdev = container_of(kobj, struct linux_cdev, kobj);
2096 parent = kobj->parent;
2097 linux_destroy_dev(cdev);
2098 linux_cdev_deref(cdev);
2099 kobject_put(parent);
2103 linux_cdev_static_release(struct kobject *kobj)
2105 struct linux_cdev *cdev;
2106 struct kobject *parent;
2108 cdev = container_of(kobj, struct linux_cdev, kobj);
2109 parent = kobj->parent;
2110 linux_destroy_dev(cdev);
2111 kobject_put(parent);
2115 linux_destroy_dev(struct linux_cdev *ldev)
2118 if (ldev->cdev == NULL)
2121 MPASS((ldev->siref & LDEV_SI_DTR) == 0);
2122 atomic_set_int(&ldev->siref, LDEV_SI_DTR);
2123 while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0)
2124 pause("ldevdtr", hz / 4);
2126 destroy_dev(ldev->cdev);
2130 const struct kobj_type linux_cdev_ktype = {
2131 .release = linux_cdev_release,
2134 const struct kobj_type linux_cdev_static_ktype = {
2135 .release = linux_cdev_static_release,
2139 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate)
2141 struct notifier_block *nb;
2144 if (linkstate == LINK_STATE_UP)
2145 nb->notifier_call(nb, NETDEV_UP, ifp);
2147 nb->notifier_call(nb, NETDEV_DOWN, ifp);
2151 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp)
2153 struct notifier_block *nb;
2156 nb->notifier_call(nb, NETDEV_REGISTER, ifp);
2160 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp)
2162 struct notifier_block *nb;
2165 nb->notifier_call(nb, NETDEV_UNREGISTER, ifp);
2169 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp)
2171 struct notifier_block *nb;
2174 nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp);
2178 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp)
2180 struct notifier_block *nb;
2183 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp);
2187 register_netdevice_notifier(struct notifier_block *nb)
2190 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER(
2191 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0);
2192 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER(
2193 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0);
2194 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER(
2195 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0);
2196 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER(
2197 iflladdr_event, linux_handle_iflladdr_event, nb, 0);
2203 register_inetaddr_notifier(struct notifier_block *nb)
2206 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER(
2207 ifaddr_event, linux_handle_ifaddr_event, nb, 0);
2212 unregister_netdevice_notifier(struct notifier_block *nb)
2215 EVENTHANDLER_DEREGISTER(ifnet_link_event,
2216 nb->tags[NETDEV_UP]);
2217 EVENTHANDLER_DEREGISTER(ifnet_arrival_event,
2218 nb->tags[NETDEV_REGISTER]);
2219 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
2220 nb->tags[NETDEV_UNREGISTER]);
2221 EVENTHANDLER_DEREGISTER(iflladdr_event,
2222 nb->tags[NETDEV_CHANGEADDR]);
2228 unregister_inetaddr_notifier(struct notifier_block *nb)
2231 EVENTHANDLER_DEREGISTER(ifaddr_event,
2232 nb->tags[NETDEV_CHANGEIFADDR]);
2237 struct list_sort_thunk {
2238 int (*cmp)(void *, struct list_head *, struct list_head *);
2243 linux_le_cmp(void *priv, const void *d1, const void *d2)
2245 struct list_head *le1, *le2;
2246 struct list_sort_thunk *thunk;
2249 le1 = *(__DECONST(struct list_head **, d1));
2250 le2 = *(__DECONST(struct list_head **, d2));
2251 return ((thunk->cmp)(thunk->priv, le1, le2));
2255 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
2256 struct list_head *a, struct list_head *b))
2258 struct list_sort_thunk thunk;
2259 struct list_head **ar, *le;
2263 list_for_each(le, head)
2265 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK);
2267 list_for_each(le, head)
2271 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp);
2272 INIT_LIST_HEAD(head);
2273 for (i = 0; i < count; i++)
2274 list_add_tail(ar[i], head);
2275 free(ar, M_KMALLOC);
2279 linux_irq_handler(void *ent)
2281 struct irq_ent *irqe;
2283 linux_set_current(curthread);
2286 irqe->handler(irqe->irq, irqe->arg);
2289 #if defined(__i386__) || defined(__amd64__)
2291 linux_wbinvd_on_all_cpus(void)
2294 pmap_invalidate_cache();
2300 linux_on_each_cpu(void callback(void *), void *data)
2303 smp_rendezvous(smp_no_rendezvous_barrier, callback,
2304 smp_no_rendezvous_barrier, data);
2309 linux_in_atomic(void)
2312 return ((curthread->td_pflags & TDP_NOFAULTING) != 0);
2316 linux_find_cdev(const char *name, unsigned major, unsigned minor)
2318 dev_t dev = MKDEV(major, minor);
2322 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) {
2323 struct linux_cdev *ldev = cdev->si_drv1;
2324 if (ldev->dev == dev &&
2325 strcmp(kobject_name(&ldev->kobj), name) == 0) {
2331 return (cdev != NULL ? cdev->si_drv1 : NULL);
2335 __register_chrdev(unsigned int major, unsigned int baseminor,
2336 unsigned int count, const char *name,
2337 const struct file_operations *fops)
2339 struct linux_cdev *cdev;
2343 for (i = baseminor; i < baseminor + count; i++) {
2344 cdev = cdev_alloc();
2346 kobject_set_name(&cdev->kobj, name);
2348 ret = cdev_add(cdev, makedev(major, i), 1);
2356 __register_chrdev_p(unsigned int major, unsigned int baseminor,
2357 unsigned int count, const char *name,
2358 const struct file_operations *fops, uid_t uid,
2359 gid_t gid, int mode)
2361 struct linux_cdev *cdev;
2365 for (i = baseminor; i < baseminor + count; i++) {
2366 cdev = cdev_alloc();
2368 kobject_set_name(&cdev->kobj, name);
2370 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode);
2378 __unregister_chrdev(unsigned int major, unsigned int baseminor,
2379 unsigned int count, const char *name)
2381 struct linux_cdev *cdevp;
2384 for (i = baseminor; i < baseminor + count; i++) {
2385 cdevp = linux_find_cdev(name, major, i);
2392 linux_dump_stack(void)
2403 #if defined(__i386__) || defined(__amd64__)
2404 bool linux_cpu_has_clflush;
2408 linux_compat_init(void *arg)
2410 struct sysctl_oid *rootoid;
2413 #if defined(__i386__) || defined(__amd64__)
2414 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH);
2416 rw_init(&linux_vma_lock, "lkpi-vma-lock");
2418 rootoid = SYSCTL_ADD_ROOT_NODE(NULL,
2419 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys");
2420 kobject_init(&linux_class_root, &linux_class_ktype);
2421 kobject_set_name(&linux_class_root, "class");
2422 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid),
2423 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class");
2424 kobject_init(&linux_root_device.kobj, &linux_dev_ktype);
2425 kobject_set_name(&linux_root_device.kobj, "device");
2426 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL,
2427 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL,
2429 linux_root_device.bsddev = root_bus;
2430 linux_class_misc.name = "misc";
2431 class_register(&linux_class_misc);
2432 INIT_LIST_HEAD(&pci_drivers);
2433 INIT_LIST_HEAD(&pci_devices);
2434 spin_lock_init(&pci_lock);
2435 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF);
2436 for (i = 0; i < VMMAP_HASH_SIZE; i++)
2437 LIST_INIT(&vmmaphead[i]);
2439 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL);
2442 linux_compat_uninit(void *arg)
2444 linux_kobject_kfree_name(&linux_class_root);
2445 linux_kobject_kfree_name(&linux_root_device.kobj);
2446 linux_kobject_kfree_name(&linux_class_misc.kobj);
2448 mtx_destroy(&vmmaplock);
2449 spin_lock_destroy(&pci_lock);
2450 rw_destroy(&linux_vma_lock);
2452 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL);
2455 * NOTE: Linux frequently uses "unsigned long" for pointer to integer
2456 * conversion and vice versa, where in FreeBSD "uintptr_t" would be
2457 * used. Assert these types have the same size, else some parts of the
2458 * LinuxKPI may not work like expected:
2460 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t));