2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
39 #include <sys/sglist.h>
40 #include <sys/sleepqueue.h>
42 #include <sys/mutex.h>
44 #include <sys/fcntl.h>
46 #include <sys/filio.h>
47 #include <sys/rwlock.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_pager.h>
56 #include <machine/stdarg.h>
58 #if defined(__i386__) || defined(__amd64__)
59 #include <machine/md_var.h>
62 #include <linux/kobject.h>
63 #include <linux/device.h>
64 #include <linux/slab.h>
65 #include <linux/module.h>
66 #include <linux/moduleparam.h>
67 #include <linux/cdev.h>
68 #include <linux/file.h>
69 #include <linux/sysfs.h>
72 #include <linux/vmalloc.h>
73 #include <linux/netdevice.h>
74 #include <linux/timer.h>
75 #include <linux/interrupt.h>
76 #include <linux/uaccess.h>
77 #include <linux/list.h>
78 #include <linux/kthread.h>
79 #include <linux/kernel.h>
80 #include <linux/compat.h>
81 #include <linux/poll.h>
82 #include <linux/smp.h>
84 #if defined(__i386__) || defined(__amd64__)
88 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters");
90 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat");
92 #include <linux/rbtree.h>
93 /* Undo Linux compat changes. */
97 #define RB_ROOT(head) (head)->rbh_root
99 static struct vm_area_struct *linux_cdev_handle_find(void *handle);
101 struct kobject linux_class_root;
102 struct device linux_root_device;
103 struct class linux_class_misc;
104 struct list_head pci_drivers;
105 struct list_head pci_devices;
108 unsigned long linux_timer_hz_mask;
111 panic_cmp(struct rb_node *one, struct rb_node *two)
116 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
119 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args)
129 if (old && fmt == NULL)
132 /* compute length of string */
133 va_copy(tmp_va, args);
134 len = vsnprintf(&dummy, 0, fmt, tmp_va);
137 /* account for zero termination */
140 /* check for error */
144 /* allocate memory for string */
145 name = kzalloc(len, GFP_KERNEL);
148 vsnprintf(name, len, fmt, args);
151 /* free old string */
154 /* filter new string */
155 for (; *name != '\0'; name++)
162 kobject_set_name(struct kobject *kobj, const char *fmt, ...)
168 error = kobject_set_name_vargs(kobj, fmt, args);
175 kobject_add_complete(struct kobject *kobj, struct kobject *parent)
177 const struct kobj_type *t;
180 kobj->parent = parent;
181 error = sysfs_create_dir(kobj);
182 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) {
183 struct attribute **attr;
186 for (attr = t->default_attrs; *attr != NULL; attr++) {
187 error = sysfs_create_file(kobj, *attr);
192 sysfs_remove_dir(kobj);
199 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...)
205 error = kobject_set_name_vargs(kobj, fmt, args);
210 return kobject_add_complete(kobj, parent);
214 linux_kobject_release(struct kref *kref)
216 struct kobject *kobj;
219 kobj = container_of(kref, struct kobject, kref);
220 sysfs_remove_dir(kobj);
222 if (kobj->ktype && kobj->ktype->release)
223 kobj->ktype->release(kobj);
228 linux_kobject_kfree(struct kobject *kobj)
234 linux_kobject_kfree_name(struct kobject *kobj)
241 const struct kobj_type linux_kfree_type = {
242 .release = linux_kobject_kfree
246 linux_device_release(struct device *dev)
248 pr_debug("linux_device_release: %s\n", dev_name(dev));
253 linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf)
255 struct class_attribute *dattr;
258 dattr = container_of(attr, struct class_attribute, attr);
261 error = dattr->show(container_of(kobj, struct class, kobj),
267 linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf,
270 struct class_attribute *dattr;
273 dattr = container_of(attr, struct class_attribute, attr);
276 error = dattr->store(container_of(kobj, struct class, kobj),
282 linux_class_release(struct kobject *kobj)
286 class = container_of(kobj, struct class, kobj);
287 if (class->class_release)
288 class->class_release(class);
291 static const struct sysfs_ops linux_class_sysfs = {
292 .show = linux_class_show,
293 .store = linux_class_store,
296 const struct kobj_type linux_class_ktype = {
297 .release = linux_class_release,
298 .sysfs_ops = &linux_class_sysfs
302 linux_dev_release(struct kobject *kobj)
306 dev = container_of(kobj, struct device, kobj);
307 /* This is the precedence defined by linux. */
310 else if (dev->class && dev->class->dev_release)
311 dev->class->dev_release(dev);
315 linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf)
317 struct device_attribute *dattr;
320 dattr = container_of(attr, struct device_attribute, attr);
323 error = dattr->show(container_of(kobj, struct device, kobj),
329 linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf,
332 struct device_attribute *dattr;
335 dattr = container_of(attr, struct device_attribute, attr);
338 error = dattr->store(container_of(kobj, struct device, kobj),
343 static const struct sysfs_ops linux_dev_sysfs = {
344 .show = linux_dev_show,
345 .store = linux_dev_store,
348 const struct kobj_type linux_dev_ktype = {
349 .release = linux_dev_release,
350 .sysfs_ops = &linux_dev_sysfs
354 device_create(struct class *class, struct device *parent, dev_t devt,
355 void *drvdata, const char *fmt, ...)
360 dev = kzalloc(sizeof(*dev), M_WAITOK);
361 dev->parent = parent;
364 dev->driver_data = drvdata;
365 dev->release = linux_device_release;
367 kobject_set_name_vargs(&dev->kobj, fmt, args);
369 device_register(dev);
375 kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype,
376 struct kobject *parent, const char *fmt, ...)
381 kobject_init(kobj, ktype);
383 kobj->parent = parent;
387 error = kobject_set_name_vargs(kobj, fmt, args);
391 return kobject_add_complete(kobj, parent);
395 linux_kq_lock(void *arg)
402 linux_kq_unlock(void *arg)
410 linux_kq_lock_owned(void *arg)
415 mtx_assert(&s->m, MA_OWNED);
420 linux_kq_lock_unowned(void *arg)
425 mtx_assert(&s->m, MA_NOTOWNED);
430 linux_file_kqfilter_poll(struct linux_file *, int);
433 linux_file_alloc(void)
435 struct linux_file *filp;
437 filp = kzalloc(sizeof(*filp), GFP_KERNEL);
439 /* set initial refcount */
442 /* setup fields needed by kqueue support */
443 spin_lock_init(&filp->f_kqlock);
444 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock,
445 linux_kq_lock, linux_kq_unlock,
446 linux_kq_lock_owned, linux_kq_lock_unowned);
452 linux_file_free(struct linux_file *filp)
454 if (filp->_file == NULL) {
455 if (filp->f_shmem != NULL)
456 vm_object_deallocate(filp->f_shmem);
460 * The close method of the character device or file
461 * will free the linux_file structure:
463 _fdrop(filp->_file, curthread);
468 linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
471 struct vm_area_struct *vmap;
473 vmap = linux_cdev_handle_find(vm_obj->handle);
476 MPASS(vmap->vm_private_data == vm_obj->handle);
478 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) {
479 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset;
482 if (((*mres)->flags & PG_FICTITIOUS) != 0) {
484 * If the passed in result page is a fake
485 * page, update it with the new physical
489 vm_page_updatefake(page, paddr, vm_obj->memattr);
492 * Replace the passed in "mres" page with our
493 * own fake page and free up the all of the
496 VM_OBJECT_WUNLOCK(vm_obj);
497 page = vm_page_getfake(paddr, vm_obj->memattr);
498 VM_OBJECT_WLOCK(vm_obj);
500 vm_page_replace_checked(page, vm_obj,
501 (*mres)->pindex, *mres);
505 vm_page_unlock(*mres);
508 page->valid = VM_PAGE_BITS_ALL;
509 return (VM_PAGER_OK);
511 return (VM_PAGER_FAIL);
515 linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type,
516 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
518 struct vm_area_struct *vmap;
521 linux_set_current(curthread);
523 /* get VM area structure */
524 vmap = linux_cdev_handle_find(vm_obj->handle);
526 MPASS(vmap->vm_private_data == vm_obj->handle);
528 VM_OBJECT_WUNLOCK(vm_obj);
530 down_write(&vmap->vm_mm->mmap_sem);
531 if (unlikely(vmap->vm_ops == NULL)) {
532 err = VM_FAULT_SIGBUS;
536 /* fill out VM fault structure */
537 vmf.virtual_address = (void *)((uintptr_t)pidx << PAGE_SHIFT);
538 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
543 vmap->vm_pfn_count = 0;
544 vmap->vm_pfn_pcount = &vmap->vm_pfn_count;
545 vmap->vm_obj = vm_obj;
547 err = vmap->vm_ops->fault(vmap, &vmf);
549 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) {
550 kern_yield(PRI_USER);
551 err = vmap->vm_ops->fault(vmap, &vmf);
555 /* translate return code */
558 err = VM_PAGER_AGAIN;
560 case VM_FAULT_SIGBUS:
563 case VM_FAULT_NOPAGE:
565 * By contract the fault handler will return having
566 * busied all the pages itself. If pidx is already
567 * found in the object, it will simply xbusy the first
568 * page and return with vm_pfn_count set to 1.
570 *first = vmap->vm_pfn_first;
571 *last = *first + vmap->vm_pfn_count - 1;
575 err = VM_PAGER_ERROR;
578 up_write(&vmap->vm_mm->mmap_sem);
579 VM_OBJECT_WLOCK(vm_obj);
583 static struct rwlock linux_vma_lock;
584 static TAILQ_HEAD(, vm_area_struct) linux_vma_head =
585 TAILQ_HEAD_INITIALIZER(linux_vma_head);
588 linux_cdev_handle_free(struct vm_area_struct *vmap)
590 /* Drop reference on vm_file */
591 if (vmap->vm_file != NULL)
594 /* Drop reference on mm_struct */
601 linux_cdev_handle_remove(struct vm_area_struct *vmap)
603 rw_wlock(&linux_vma_lock);
604 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry);
605 rw_wunlock(&linux_vma_lock);
608 static struct vm_area_struct *
609 linux_cdev_handle_find(void *handle)
611 struct vm_area_struct *vmap;
613 rw_rlock(&linux_vma_lock);
614 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) {
615 if (vmap->vm_private_data == handle)
618 rw_runlock(&linux_vma_lock);
623 linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
624 vm_ooffset_t foff, struct ucred *cred, u_short *color)
627 MPASS(linux_cdev_handle_find(handle) != NULL);
633 linux_cdev_pager_dtor(void *handle)
635 const struct vm_operations_struct *vm_ops;
636 struct vm_area_struct *vmap;
638 vmap = linux_cdev_handle_find(handle);
642 * Remove handle before calling close operation to prevent
643 * other threads from reusing the handle pointer.
645 linux_cdev_handle_remove(vmap);
647 down_write(&vmap->vm_mm->mmap_sem);
648 vm_ops = vmap->vm_ops;
649 if (likely(vm_ops != NULL))
651 up_write(&vmap->vm_mm->mmap_sem);
653 linux_cdev_handle_free(vmap);
656 static struct cdev_pager_ops linux_cdev_pager_ops[2] = {
659 .cdev_pg_populate = linux_cdev_pager_populate,
660 .cdev_pg_ctor = linux_cdev_pager_ctor,
661 .cdev_pg_dtor = linux_cdev_pager_dtor
665 .cdev_pg_fault = linux_cdev_pager_fault,
666 .cdev_pg_ctor = linux_cdev_pager_ctor,
667 .cdev_pg_dtor = linux_cdev_pager_dtor
671 #define OPW(fp,td,code) ({ \
672 struct file *__fpop; \
673 __typeof(code) __retval; \
675 __fpop = (td)->td_fpop; \
676 (td)->td_fpop = (fp); \
678 (td)->td_fpop = __fpop; \
683 linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, struct file *file)
685 struct linux_cdev *ldev;
686 struct linux_file *filp;
691 filp = linux_file_alloc();
692 filp->f_dentry = &filp->f_dentry_store;
693 filp->f_op = ldev->ops;
694 filp->f_mode = file->f_flag;
695 filp->f_flags = file->f_flag;
696 filp->f_vnode = file->f_vnode;
699 linux_set_current(td);
701 if (filp->f_op->open) {
702 error = -filp->f_op->open(file->f_vnode, filp);
709 /* hold on to the vnode - used for fstat() */
710 vhold(filp->f_vnode);
712 /* release the file from devfs */
713 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops);
717 #define LINUX_IOCTL_MIN_PTR 0x10000UL
718 #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX)
721 linux_remap_address(void **uaddr, size_t len)
723 uintptr_t uaddr_val = (uintptr_t)(*uaddr);
725 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR &&
726 uaddr_val < LINUX_IOCTL_MAX_PTR)) {
727 struct task_struct *pts = current;
733 /* compute data offset */
734 uaddr_val -= LINUX_IOCTL_MIN_PTR;
736 /* check that length is within bounds */
737 if ((len > IOCPARM_MAX) ||
738 (uaddr_val + len) > pts->bsd_ioctl_len) {
743 /* re-add kernel buffer address */
744 uaddr_val += (uintptr_t)pts->bsd_ioctl_data;
746 /* update address location */
747 *uaddr = (void *)uaddr_val;
754 linux_copyin(const void *uaddr, void *kaddr, size_t len)
756 if (linux_remap_address(__DECONST(void **, &uaddr), len)) {
759 memcpy(kaddr, uaddr, len);
762 return (-copyin(uaddr, kaddr, len));
766 linux_copyout(const void *kaddr, void *uaddr, size_t len)
768 if (linux_remap_address(&uaddr, len)) {
771 memcpy(uaddr, kaddr, len);
774 return (-copyout(kaddr, uaddr, len));
778 linux_clear_user(void *_uaddr, size_t _len)
780 uint8_t *uaddr = _uaddr;
783 /* make sure uaddr is aligned before going into the fast loop */
784 while (((uintptr_t)uaddr & 7) != 0 && len > 7) {
785 if (subyte(uaddr, 0))
791 /* zero 8 bytes at a time */
794 if (suword64(uaddr, 0))
797 if (suword32(uaddr, 0))
799 if (suword32(uaddr + 4, 0))
806 /* zero fill end, if any */
808 if (subyte(uaddr, 0))
817 linux_access_ok(int rw, const void *uaddr, size_t len)
822 /* get start and end address */
823 saddr = (uintptr_t)uaddr;
824 eaddr = (uintptr_t)uaddr + len;
826 /* verify addresses are valid for userspace */
827 return ((saddr == eaddr) ||
828 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS));
832 linux_file_ioctl_sub(struct file *fp, struct linux_file *filp,
833 u_long cmd, caddr_t data, struct thread *td)
838 size = IOCPARM_LEN(cmd);
839 /* refer to logic in sys_ioctl() */
842 * Setup hint for linux_copyin() and linux_copyout().
844 * Background: Linux code expects a user-space address
845 * while FreeBSD supplies a kernel-space address.
847 current->bsd_ioctl_data = data;
848 current->bsd_ioctl_len = size;
849 data = (void *)LINUX_IOCTL_MIN_PTR;
851 /* fetch user-space pointer */
852 data = *(void **)data;
854 #if defined(__amd64__)
855 if (td->td_proc->p_elf_machine == EM_386) {
856 /* try the compat IOCTL handler first */
857 if (filp->f_op->compat_ioctl != NULL)
858 error = -OPW(fp, td, filp->f_op->compat_ioctl(filp, cmd, (u_long)data));
862 /* fallback to the regular IOCTL handler, if any */
863 if (error == ENOTTY && filp->f_op->unlocked_ioctl != NULL)
864 error = -OPW(fp, td, filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data));
867 if (filp->f_op->unlocked_ioctl != NULL)
868 error = -OPW(fp, td, filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data));
872 current->bsd_ioctl_data = NULL;
873 current->bsd_ioctl_len = 0;
876 if (error == EWOULDBLOCK) {
877 /* update kqfilter status, if any */
878 linux_file_kqfilter_poll(filp,
879 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE);
880 } else if (error == ERESTARTSYS)
885 #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1)
888 * This function atomically updates the poll wakeup state and returns
889 * the previous state at the time of update.
892 linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate)
898 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c)
906 linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key)
908 static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
909 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */
910 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */
911 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY,
912 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */
914 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq);
916 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
917 case LINUX_FWQ_STATE_QUEUED:
918 linux_poll_wakeup(filp);
926 linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p)
928 static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
929 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY,
930 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */
931 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */
932 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED,
935 /* check if we are called inside the select system call */
936 if (p == LINUX_POLL_TABLE_NORMAL)
937 selrecord(curthread, &filp->f_selinfo);
939 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
940 case LINUX_FWQ_STATE_INIT:
941 /* NOTE: file handles can only belong to one wait-queue */
942 filp->f_wait_queue.wqh = wqh;
943 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback;
944 add_wait_queue(wqh, &filp->f_wait_queue.wq);
945 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED);
953 linux_poll_wait_dequeue(struct linux_file *filp)
955 static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
956 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */
957 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT,
958 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT,
959 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT,
962 seldrain(&filp->f_selinfo);
964 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
965 case LINUX_FWQ_STATE_NOT_READY:
966 case LINUX_FWQ_STATE_QUEUED:
967 case LINUX_FWQ_STATE_READY:
968 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq);
976 linux_poll_wakeup(struct linux_file *filp)
978 /* this function should be NULL-safe */
982 selwakeup(&filp->f_selinfo);
984 spin_lock(&filp->f_kqlock);
985 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ |
986 LINUX_KQ_FLAG_NEED_WRITE;
988 /* make sure the "knote" gets woken up */
989 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1);
990 spin_unlock(&filp->f_kqlock);
994 linux_file_kqfilter_detach(struct knote *kn)
996 struct linux_file *filp = kn->kn_hook;
998 spin_lock(&filp->f_kqlock);
999 knlist_remove(&filp->f_selinfo.si_note, kn, 1);
1000 spin_unlock(&filp->f_kqlock);
1004 linux_file_kqfilter_read_event(struct knote *kn, long hint)
1006 struct linux_file *filp = kn->kn_hook;
1008 mtx_assert(&filp->f_kqlock.m, MA_OWNED);
1010 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0);
1014 linux_file_kqfilter_write_event(struct knote *kn, long hint)
1016 struct linux_file *filp = kn->kn_hook;
1018 mtx_assert(&filp->f_kqlock.m, MA_OWNED);
1020 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0);
1023 static struct filterops linux_dev_kqfiltops_read = {
1025 .f_detach = linux_file_kqfilter_detach,
1026 .f_event = linux_file_kqfilter_read_event,
1029 static struct filterops linux_dev_kqfiltops_write = {
1031 .f_detach = linux_file_kqfilter_detach,
1032 .f_event = linux_file_kqfilter_write_event,
1036 linux_file_kqfilter_poll(struct linux_file *filp, int kqflags)
1040 if (filp->f_kqflags & kqflags) {
1041 struct thread *td = curthread;
1043 /* get the latest polling state */
1044 temp = OPW(filp->_file, td, filp->f_op->poll(filp, NULL));
1046 spin_lock(&filp->f_kqlock);
1048 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ |
1049 LINUX_KQ_FLAG_NEED_WRITE);
1050 /* update kqflags */
1051 if (temp & (POLLIN | POLLOUT)) {
1053 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ;
1055 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE;
1057 /* make sure the "knote" gets woken up */
1058 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0);
1060 spin_unlock(&filp->f_kqlock);
1065 linux_file_kqfilter(struct file *file, struct knote *kn)
1067 struct linux_file *filp;
1072 filp = (struct linux_file *)file->f_data;
1073 filp->f_flags = file->f_flag;
1074 if (filp->f_op->poll == NULL)
1077 spin_lock(&filp->f_kqlock);
1078 switch (kn->kn_filter) {
1080 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ;
1081 kn->kn_fop = &linux_dev_kqfiltops_read;
1083 knlist_add(&filp->f_selinfo.si_note, kn, 1);
1087 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE;
1088 kn->kn_fop = &linux_dev_kqfiltops_write;
1090 knlist_add(&filp->f_selinfo.si_note, kn, 1);
1097 spin_unlock(&filp->f_kqlock);
1100 linux_set_current(td);
1102 /* update kqfilter status, if any */
1103 linux_file_kqfilter_poll(filp,
1104 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE);
1110 linux_file_mmap_single(struct file *fp, vm_ooffset_t *offset,
1111 vm_size_t size, struct vm_object **object, int nprot,
1114 struct vm_area_struct *vmap;
1115 struct mm_struct *mm;
1116 struct linux_file *filp;
1120 filp = (struct linux_file *)fp->f_data;
1121 filp->f_flags = fp->f_flag;
1123 if (filp->f_op->mmap == NULL)
1124 return (EOPNOTSUPP);
1126 linux_set_current(td);
1129 * The same VM object might be shared by multiple processes
1130 * and the mm_struct is usually freed when a process exits.
1132 * The atomic reference below makes sure the mm_struct is
1133 * available as long as the vmap is in the linux_vma_head.
1136 if (atomic_inc_not_zero(&mm->mm_users) == 0)
1139 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL);
1141 vmap->vm_end = size;
1142 vmap->vm_pgoff = *offset / PAGE_SIZE;
1144 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL);
1145 vmap->vm_ops = NULL;
1146 vmap->vm_file = get_file(filp);
1149 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) {
1152 error = -OPW(fp, td, filp->f_op->mmap(filp, vmap));
1153 if (error == ERESTARTSYS)
1155 up_write(&vmap->vm_mm->mmap_sem);
1159 linux_cdev_handle_free(vmap);
1163 attr = pgprot2cachemode(vmap->vm_page_prot);
1165 if (vmap->vm_ops != NULL) {
1166 struct vm_area_struct *ptr;
1167 void *vm_private_data;
1170 if (vmap->vm_ops->open == NULL ||
1171 vmap->vm_ops->close == NULL ||
1172 vmap->vm_private_data == NULL) {
1173 /* free allocated VM area struct */
1174 linux_cdev_handle_free(vmap);
1178 vm_private_data = vmap->vm_private_data;
1180 rw_wlock(&linux_vma_lock);
1181 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) {
1182 if (ptr->vm_private_data == vm_private_data)
1185 /* check if there is an existing VM area struct */
1187 /* check if the VM area structure is invalid */
1188 if (ptr->vm_ops == NULL ||
1189 ptr->vm_ops->open == NULL ||
1190 ptr->vm_ops->close == NULL) {
1195 vm_no_fault = (ptr->vm_ops->fault == NULL);
1198 /* insert VM area structure into list */
1199 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry);
1201 vm_no_fault = (vmap->vm_ops->fault == NULL);
1203 rw_wunlock(&linux_vma_lock);
1206 /* free allocated VM area struct */
1207 linux_cdev_handle_free(vmap);
1208 /* check for stale VM area struct */
1209 if (error != EEXIST)
1213 /* check if there is no fault handler */
1215 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE,
1216 &linux_cdev_pager_ops[1], size, nprot, *offset,
1219 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE,
1220 &linux_cdev_pager_ops[0], size, nprot, *offset,
1224 /* check if allocating the VM object failed */
1225 if (*object == NULL) {
1227 /* remove VM area struct from list */
1228 linux_cdev_handle_remove(vmap);
1229 /* free allocated VM area struct */
1230 linux_cdev_handle_free(vmap);
1237 sg = sglist_alloc(1, M_WAITOK);
1238 sglist_append_phys(sg,
1239 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len);
1241 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len,
1242 nprot, 0, td->td_ucred);
1244 linux_cdev_handle_free(vmap);
1246 if (*object == NULL) {
1252 if (attr != VM_MEMATTR_DEFAULT) {
1253 VM_OBJECT_WLOCK(*object);
1254 vm_object_set_memattr(*object, attr);
1255 VM_OBJECT_WUNLOCK(*object);
1261 struct cdevsw linuxcdevsw = {
1262 .d_version = D_VERSION,
1263 .d_fdopen = linux_dev_fdopen,
1264 .d_name = "lkpidev",
1268 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred,
1269 int flags, struct thread *td)
1271 struct linux_file *filp;
1276 filp = (struct linux_file *)file->f_data;
1277 filp->f_flags = file->f_flag;
1278 /* XXX no support for I/O vectors currently */
1279 if (uio->uio_iovcnt != 1)
1280 return (EOPNOTSUPP);
1281 if (uio->uio_resid > DEVFS_IOSIZE_MAX)
1283 linux_set_current(td);
1284 if (filp->f_op->read) {
1285 bytes = OPW(file, td, filp->f_op->read(filp, uio->uio_iov->iov_base,
1286 uio->uio_iov->iov_len, &uio->uio_offset));
1288 uio->uio_iov->iov_base =
1289 ((uint8_t *)uio->uio_iov->iov_base) + bytes;
1290 uio->uio_iov->iov_len -= bytes;
1291 uio->uio_resid -= bytes;
1294 if (error == ERESTARTSYS)
1300 /* update kqfilter status, if any */
1301 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ);
1307 linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred,
1308 int flags, struct thread *td)
1310 struct linux_file *filp;
1315 filp = (struct linux_file *)file->f_data;
1316 filp->f_flags = file->f_flag;
1317 /* XXX no support for I/O vectors currently */
1318 if (uio->uio_iovcnt != 1)
1319 return (EOPNOTSUPP);
1320 if (uio->uio_resid > DEVFS_IOSIZE_MAX)
1322 linux_set_current(td);
1323 if (filp->f_op->write) {
1324 bytes = OPW(file, td, filp->f_op->write(filp, uio->uio_iov->iov_base,
1325 uio->uio_iov->iov_len, &uio->uio_offset));
1327 uio->uio_iov->iov_base =
1328 ((uint8_t *)uio->uio_iov->iov_base) + bytes;
1329 uio->uio_iov->iov_len -= bytes;
1330 uio->uio_resid -= bytes;
1333 if (error == ERESTARTSYS)
1339 /* update kqfilter status, if any */
1340 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE);
1346 linux_file_poll(struct file *file, int events, struct ucred *active_cred,
1349 struct linux_file *filp;
1352 filp = (struct linux_file *)file->f_data;
1353 filp->f_flags = file->f_flag;
1354 linux_set_current(td);
1355 if (filp->f_op->poll != NULL)
1356 revents = OPW(file, td, filp->f_op->poll(filp, LINUX_POLL_TABLE_NORMAL)) & events;
1364 linux_file_close(struct file *file, struct thread *td)
1366 struct linux_file *filp;
1369 filp = (struct linux_file *)file->f_data;
1371 KASSERT(file_count(filp) == 0, ("File refcount(%d) is not zero", file_count(filp)));
1373 filp->f_flags = file->f_flag;
1374 linux_set_current(td);
1375 linux_poll_wait_dequeue(filp);
1376 error = -OPW(file, td, filp->f_op->release(filp->f_vnode, filp));
1377 funsetown(&filp->f_sigio);
1378 if (filp->f_vnode != NULL)
1379 vdrop(filp->f_vnode);
1386 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred,
1389 struct linux_file *filp;
1392 filp = (struct linux_file *)fp->f_data;
1393 filp->f_flags = fp->f_flag;
1396 linux_set_current(td);
1401 if (filp->f_op->fasync == NULL)
1403 error = -OPW(fp, td, filp->f_op->fasync(0, filp, fp->f_flag & FASYNC));
1406 error = fsetown(*(int *)data, &filp->f_sigio);
1408 if (filp->f_op->fasync == NULL)
1410 error = -OPW(fp, td, filp->f_op->fasync(0, filp,
1411 fp->f_flag & FASYNC));
1415 *(int *)data = fgetown(&filp->f_sigio);
1418 error = linux_file_ioctl_sub(fp, filp, cmd, data, td);
1425 linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot,
1426 vm_prot_t *maxprotp, int *flagsp, struct file *fp,
1427 vm_ooffset_t *foff, vm_object_t *objp)
1430 * Character devices do not provide private mappings
1433 if ((*maxprotp & VM_PROT_WRITE) == 0 &&
1434 (prot & VM_PROT_WRITE) != 0)
1436 if ((*flagsp & (MAP_PRIVATE | MAP_COPY)) != 0)
1439 return (linux_file_mmap_single(fp, foff, objsize, objp, (int)prot, td));
1443 linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size,
1444 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff,
1447 struct linux_file *filp;
1454 filp = (struct linux_file *)fp->f_data;
1458 return (EOPNOTSUPP);
1461 * Ensure that file and memory protections are
1465 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) {
1466 maxprot = VM_PROT_NONE;
1467 if ((prot & VM_PROT_EXECUTE) != 0)
1470 maxprot = VM_PROT_EXECUTE;
1471 if ((fp->f_flag & FREAD) != 0)
1472 maxprot |= VM_PROT_READ;
1473 else if ((prot & VM_PROT_READ) != 0)
1477 * If we are sharing potential changes via MAP_SHARED and we
1478 * are trying to get write permission although we opened it
1479 * without asking for it, bail out.
1481 * Note that most character devices always share mappings.
1483 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE
1484 * requests rather than doing it here.
1486 if ((flags & MAP_SHARED) != 0) {
1487 if ((fp->f_flag & FWRITE) != 0)
1488 maxprot |= VM_PROT_WRITE;
1489 else if ((prot & VM_PROT_WRITE) != 0)
1492 maxprot &= cap_maxprot;
1494 error = linux_file_mmap_sub(td, size, prot, &maxprot, &flags, fp, &foff,
1499 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
1502 vm_object_deallocate(object);
1507 linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
1510 struct linux_file *filp;
1514 filp = (struct linux_file *)fp->f_data;
1515 if (filp->f_vnode == NULL)
1516 return (EOPNOTSUPP);
1520 vn_lock(vp, LK_SHARED | LK_RETRY);
1521 error = vn_stat(vp, sb, td->td_ucred, NOCRED, td);
1528 linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1529 struct filedesc *fdp)
1536 linux_iminor(struct inode *inode)
1538 struct linux_cdev *ldev;
1540 if (inode == NULL || inode->v_rdev == NULL ||
1541 inode->v_rdev->si_devsw != &linuxcdevsw)
1543 ldev = inode->v_rdev->si_drv1;
1547 return (minor(ldev->dev));
1550 struct fileops linuxfileops = {
1551 .fo_read = linux_file_read,
1552 .fo_write = linux_file_write,
1553 .fo_truncate = invfo_truncate,
1554 .fo_kqfilter = linux_file_kqfilter,
1555 .fo_stat = linux_file_stat,
1556 .fo_fill_kinfo = linux_file_fill_kinfo,
1557 .fo_poll = linux_file_poll,
1558 .fo_close = linux_file_close,
1559 .fo_ioctl = linux_file_ioctl,
1560 .fo_mmap = linux_file_mmap,
1561 .fo_chmod = invfo_chmod,
1562 .fo_chown = invfo_chown,
1563 .fo_sendfile = invfo_sendfile,
1564 .fo_flags = DFLAG_PASSABLE,
1568 * Hash of vmmap addresses. This is infrequently accessed and does not
1569 * need to be particularly large. This is done because we must store the
1570 * caller's idea of the map size to properly unmap.
1573 LIST_ENTRY(vmmap) vm_next;
1575 unsigned long vm_size;
1579 struct vmmap *lh_first;
1581 #define VMMAP_HASH_SIZE 64
1582 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1)
1583 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK
1584 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE];
1585 static struct mtx vmmaplock;
1588 vmmap_add(void *addr, unsigned long size)
1590 struct vmmap *vmmap;
1592 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL);
1593 mtx_lock(&vmmaplock);
1594 vmmap->vm_size = size;
1595 vmmap->vm_addr = addr;
1596 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next);
1597 mtx_unlock(&vmmaplock);
1600 static struct vmmap *
1601 vmmap_remove(void *addr)
1603 struct vmmap *vmmap;
1605 mtx_lock(&vmmaplock);
1606 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
1607 if (vmmap->vm_addr == addr)
1610 LIST_REMOVE(vmmap, vm_next);
1611 mtx_unlock(&vmmaplock);
1616 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
1618 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr)
1622 addr = pmap_mapdev_attr(phys_addr, size, attr);
1625 vmmap_add(addr, size);
1634 struct vmmap *vmmap;
1636 vmmap = vmmap_remove(addr);
1639 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
1640 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size);
1647 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
1652 size = count * PAGE_SIZE;
1653 off = kva_alloc(size);
1656 vmmap_add((void *)off, size);
1657 pmap_qenter(off, pages, count);
1659 return ((void *)off);
1665 struct vmmap *vmmap;
1667 vmmap = vmmap_remove(addr);
1670 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE);
1671 kva_free((vm_offset_t)addr, vmmap->vm_size);
1676 kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
1683 len = vsnprintf(NULL, 0, fmt, aq);
1686 p = kmalloc(len + 1, gfp);
1688 vsnprintf(p, len + 1, fmt, ap);
1694 kasprintf(gfp_t gfp, const char *fmt, ...)
1700 p = kvasprintf(gfp, fmt, ap);
1707 linux_timer_callback_wrapper(void *context)
1709 struct timer_list *timer;
1711 linux_set_current(curthread);
1714 timer->function(timer->data);
1718 mod_timer(struct timer_list *timer, int expires)
1721 timer->expires = expires;
1722 callout_reset(&timer->timer_callout,
1723 linux_timer_jiffies_until(expires),
1724 &linux_timer_callback_wrapper, timer);
1728 add_timer(struct timer_list *timer)
1731 callout_reset(&timer->timer_callout,
1732 linux_timer_jiffies_until(timer->expires),
1733 &linux_timer_callback_wrapper, timer);
1737 add_timer_on(struct timer_list *timer, int cpu)
1740 callout_reset_on(&timer->timer_callout,
1741 linux_timer_jiffies_until(timer->expires),
1742 &linux_timer_callback_wrapper, timer, cpu);
1746 linux_timer_init(void *arg)
1750 * Compute an internal HZ value which can divide 2**32 to
1751 * avoid timer rounding problems when the tick value wraps
1754 linux_timer_hz_mask = 1;
1755 while (linux_timer_hz_mask < (unsigned long)hz)
1756 linux_timer_hz_mask *= 2;
1757 linux_timer_hz_mask--;
1759 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL);
1762 linux_complete_common(struct completion *c, int all)
1769 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
1771 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
1778 * Indefinite wait for done != 0 with or without signals.
1781 linux_wait_for_common(struct completion *c, int flags)
1785 if (SCHEDULER_STOPPED())
1791 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
1793 flags = SLEEPQ_SLEEP;
1799 sleepq_add(c, NULL, "completion", flags, 0);
1800 if (flags & SLEEPQ_INTERRUPTIBLE) {
1801 if (sleepq_wait_sig(c, 0) != 0) {
1802 error = -ERESTARTSYS;
1818 * Time limited wait for done != 0 with or without signals.
1821 linux_wait_for_timeout_common(struct completion *c, int timeout, int flags)
1823 int end = jiffies + timeout;
1827 if (SCHEDULER_STOPPED())
1833 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
1835 flags = SLEEPQ_SLEEP;
1843 sleepq_add(c, NULL, "completion", flags, 0);
1844 sleepq_set_timeout(c, linux_timer_jiffies_until(end));
1845 if (flags & SLEEPQ_INTERRUPTIBLE)
1846 ret = sleepq_timedwait_sig(c, 0);
1848 ret = sleepq_timedwait(c, 0);
1850 /* check for timeout or signal */
1851 if (ret == EWOULDBLOCK)
1854 error = -ERESTARTSYS;
1864 /* return how many jiffies are left */
1865 return (ret != 0 ? error : linux_timer_jiffies_until(end));
1869 linux_try_wait_for_completion(struct completion *c)
1884 linux_completion_done(struct completion *c)
1897 linux_cdev_release(struct kobject *kobj)
1899 struct linux_cdev *cdev;
1900 struct kobject *parent;
1902 cdev = container_of(kobj, struct linux_cdev, kobj);
1903 parent = kobj->parent;
1905 destroy_dev(cdev->cdev);
1907 kobject_put(parent);
1911 linux_cdev_static_release(struct kobject *kobj)
1913 struct linux_cdev *cdev;
1914 struct kobject *parent;
1916 cdev = container_of(kobj, struct linux_cdev, kobj);
1917 parent = kobj->parent;
1919 destroy_dev(cdev->cdev);
1920 kobject_put(parent);
1923 const struct kobj_type linux_cdev_ktype = {
1924 .release = linux_cdev_release,
1927 const struct kobj_type linux_cdev_static_ktype = {
1928 .release = linux_cdev_static_release,
1932 linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate)
1934 struct notifier_block *nb;
1937 if (linkstate == LINK_STATE_UP)
1938 nb->notifier_call(nb, NETDEV_UP, ifp);
1940 nb->notifier_call(nb, NETDEV_DOWN, ifp);
1944 linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp)
1946 struct notifier_block *nb;
1949 nb->notifier_call(nb, NETDEV_REGISTER, ifp);
1953 linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp)
1955 struct notifier_block *nb;
1958 nb->notifier_call(nb, NETDEV_UNREGISTER, ifp);
1962 linux_handle_iflladdr_event(void *arg, struct ifnet *ifp)
1964 struct notifier_block *nb;
1967 nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp);
1971 linux_handle_ifaddr_event(void *arg, struct ifnet *ifp)
1973 struct notifier_block *nb;
1976 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp);
1980 register_netdevice_notifier(struct notifier_block *nb)
1983 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER(
1984 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0);
1985 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER(
1986 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0);
1987 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER(
1988 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0);
1989 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER(
1990 iflladdr_event, linux_handle_iflladdr_event, nb, 0);
1996 register_inetaddr_notifier(struct notifier_block *nb)
1999 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER(
2000 ifaddr_event, linux_handle_ifaddr_event, nb, 0);
2005 unregister_netdevice_notifier(struct notifier_block *nb)
2008 EVENTHANDLER_DEREGISTER(ifnet_link_event,
2009 nb->tags[NETDEV_UP]);
2010 EVENTHANDLER_DEREGISTER(ifnet_arrival_event,
2011 nb->tags[NETDEV_REGISTER]);
2012 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
2013 nb->tags[NETDEV_UNREGISTER]);
2014 EVENTHANDLER_DEREGISTER(iflladdr_event,
2015 nb->tags[NETDEV_CHANGEADDR]);
2021 unregister_inetaddr_notifier(struct notifier_block *nb)
2024 EVENTHANDLER_DEREGISTER(ifaddr_event,
2025 nb->tags[NETDEV_CHANGEIFADDR]);
2030 struct list_sort_thunk {
2031 int (*cmp)(void *, struct list_head *, struct list_head *);
2036 linux_le_cmp(void *priv, const void *d1, const void *d2)
2038 struct list_head *le1, *le2;
2039 struct list_sort_thunk *thunk;
2042 le1 = *(__DECONST(struct list_head **, d1));
2043 le2 = *(__DECONST(struct list_head **, d2));
2044 return ((thunk->cmp)(thunk->priv, le1, le2));
2048 list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
2049 struct list_head *a, struct list_head *b))
2051 struct list_sort_thunk thunk;
2052 struct list_head **ar, *le;
2056 list_for_each(le, head)
2058 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK);
2060 list_for_each(le, head)
2064 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp);
2065 INIT_LIST_HEAD(head);
2066 for (i = 0; i < count; i++)
2067 list_add_tail(ar[i], head);
2068 free(ar, M_KMALLOC);
2072 linux_irq_handler(void *ent)
2074 struct irq_ent *irqe;
2076 linux_set_current(curthread);
2079 irqe->handler(irqe->irq, irqe->arg);
2082 #if defined(__i386__) || defined(__amd64__)
2084 linux_wbinvd_on_all_cpus(void)
2087 pmap_invalidate_cache();
2093 linux_on_each_cpu(void callback(void *), void *data)
2096 smp_rendezvous(smp_no_rendezvous_barrier, callback,
2097 smp_no_rendezvous_barrier, data);
2102 linux_in_atomic(void)
2105 return ((curthread->td_pflags & TDP_NOFAULTING) != 0);
2109 linux_find_cdev(const char *name, unsigned major, unsigned minor)
2111 dev_t dev = MKDEV(major, minor);
2115 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) {
2116 struct linux_cdev *ldev = cdev->si_drv1;
2117 if (ldev->dev == dev &&
2118 strcmp(kobject_name(&ldev->kobj), name) == 0) {
2124 return (cdev != NULL ? cdev->si_drv1 : NULL);
2128 __register_chrdev(unsigned int major, unsigned int baseminor,
2129 unsigned int count, const char *name,
2130 const struct file_operations *fops)
2132 struct linux_cdev *cdev;
2136 for (i = baseminor; i < baseminor + count; i++) {
2137 cdev = cdev_alloc();
2138 cdev_init(cdev, fops);
2139 kobject_set_name(&cdev->kobj, name);
2141 ret = cdev_add(cdev, makedev(major, i), 1);
2149 __register_chrdev_p(unsigned int major, unsigned int baseminor,
2150 unsigned int count, const char *name,
2151 const struct file_operations *fops, uid_t uid,
2152 gid_t gid, int mode)
2154 struct linux_cdev *cdev;
2158 for (i = baseminor; i < baseminor + count; i++) {
2159 cdev = cdev_alloc();
2160 cdev_init(cdev, fops);
2161 kobject_set_name(&cdev->kobj, name);
2163 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode);
2171 __unregister_chrdev(unsigned int major, unsigned int baseminor,
2172 unsigned int count, const char *name)
2174 struct linux_cdev *cdevp;
2177 for (i = baseminor; i < baseminor + count; i++) {
2178 cdevp = linux_find_cdev(name, major, i);
2184 #if defined(__i386__) || defined(__amd64__)
2185 bool linux_cpu_has_clflush;
2189 linux_compat_init(void *arg)
2191 struct sysctl_oid *rootoid;
2194 #if defined(__i386__) || defined(__amd64__)
2195 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH);
2197 rw_init(&linux_vma_lock, "lkpi-vma-lock");
2199 rootoid = SYSCTL_ADD_ROOT_NODE(NULL,
2200 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys");
2201 kobject_init(&linux_class_root, &linux_class_ktype);
2202 kobject_set_name(&linux_class_root, "class");
2203 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid),
2204 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class");
2205 kobject_init(&linux_root_device.kobj, &linux_dev_ktype);
2206 kobject_set_name(&linux_root_device.kobj, "device");
2207 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL,
2208 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL,
2210 linux_root_device.bsddev = root_bus;
2211 linux_class_misc.name = "misc";
2212 class_register(&linux_class_misc);
2213 INIT_LIST_HEAD(&pci_drivers);
2214 INIT_LIST_HEAD(&pci_devices);
2215 spin_lock_init(&pci_lock);
2216 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF);
2217 for (i = 0; i < VMMAP_HASH_SIZE; i++)
2218 LIST_INIT(&vmmaphead[i]);
2220 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL);
2223 linux_compat_uninit(void *arg)
2225 linux_kobject_kfree_name(&linux_class_root);
2226 linux_kobject_kfree_name(&linux_root_device.kobj);
2227 linux_kobject_kfree_name(&linux_class_misc.kobj);
2229 mtx_destroy(&vmmaplock);
2230 spin_lock_destroy(&pci_lock);
2231 rw_destroy(&linux_vma_lock);
2233 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL);
2236 * NOTE: Linux frequently uses "unsigned long" for pointer to integer
2237 * conversion and vice versa, where in FreeBSD "uintptr_t" would be
2238 * used. Assert these types have the same size, else some parts of the
2239 * LinuxKPI may not work like expected:
2241 CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t));