2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
36 #include <sys/sglist.h>
37 #include <sys/sleepqueue.h>
39 #include <sys/mutex.h>
41 #include <sys/fcntl.h>
43 #include <sys/filio.h>
44 #include <sys/rwlock.h>
49 #include <machine/stdarg.h>
50 #include <machine/pmap.h>
52 #include <linux/kobject.h>
53 #include <linux/device.h>
54 #include <linux/slab.h>
55 #include <linux/module.h>
56 #include <linux/moduleparam.h>
57 #include <linux/cdev.h>
58 #include <linux/file.h>
59 #include <linux/sysfs.h>
62 #include <linux/vmalloc.h>
63 #include <linux/timer.h>
64 #include <linux/netdevice.h>
66 #include <vm/vm_pager.h>
68 #include <linux/workqueue.h>
70 SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters");
72 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat");
74 #include <linux/rbtree.h>
75 /* Undo Linux compat changes. */
79 #define RB_ROOT(head) (head)->rbh_root
81 struct kobject class_root;
82 struct device linux_rootdev;
83 struct class miscclass;
84 struct list_head pci_drivers;
85 struct list_head pci_devices;
88 unsigned long linux_timer_hz_mask;
91 panic_cmp(struct rb_node *one, struct rb_node *two)
96 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
99 kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args)
109 if (old && fmt == NULL)
112 /* compute length of string */
113 va_copy(tmp_va, args);
114 len = vsnprintf(&dummy, 0, fmt, tmp_va);
117 /* account for zero termination */
120 /* check for error */
124 /* allocate memory for string */
125 name = kzalloc(len, GFP_KERNEL);
128 vsnprintf(name, len, fmt, args);
131 /* free old string */
134 /* filter new string */
135 for (; *name != '\0'; name++)
142 kobject_set_name(struct kobject *kobj, const char *fmt, ...)
148 error = kobject_set_name_vargs(kobj, fmt, args);
155 kobject_add_complete(struct kobject *kobj, struct kobject *parent)
160 kobj->parent = kobject_get(parent);
161 error = sysfs_create_dir(kobj);
162 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) {
163 struct attribute **attr;
166 for (attr = t->default_attrs; *attr != NULL; attr++) {
167 error = sysfs_create_file(kobj, *attr);
172 sysfs_remove_dir(kobj);
179 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...)
185 error = kobject_set_name_vargs(kobj, fmt, args);
190 return kobject_add_complete(kobj, parent);
194 kobject_release(struct kref *kref)
196 struct kobject *kobj;
199 kobj = container_of(kref, struct kobject, kref);
200 sysfs_remove_dir(kobj);
202 kobject_put(kobj->parent);
205 if (kobj->ktype && kobj->ktype->release)
206 kobj->ktype->release(kobj);
211 kobject_kfree(struct kobject *kobj)
217 kobject_kfree_name(struct kobject *kobj)
224 struct kobj_type kfree_type = { .release = kobject_kfree };
227 dev_release(struct device *dev)
229 pr_debug("dev_release: %s\n", dev_name(dev));
234 device_create(struct class *class, struct device *parent, dev_t devt,
235 void *drvdata, const char *fmt, ...)
240 dev = kzalloc(sizeof(*dev), M_WAITOK);
241 dev->parent = parent;
244 dev->driver_data = drvdata;
245 dev->release = dev_release;
247 kobject_set_name_vargs(&dev->kobj, fmt, args);
249 device_register(dev);
255 kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
256 struct kobject *parent, const char *fmt, ...)
261 kobject_init(kobj, ktype);
263 kobj->parent = parent;
267 error = kobject_set_name_vargs(kobj, fmt, args);
271 return kobject_add_complete(kobj, parent);
275 linux_file_dtor(void *cdp)
277 struct linux_file *filp;
280 filp->f_op->release(filp->f_vnode, filp);
281 vdrop(filp->f_vnode);
286 linux_dev_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
288 struct linux_cdev *ldev;
289 struct linux_file *filp;
293 file = curthread->td_fpop;
297 filp = kzalloc(sizeof(*filp), GFP_KERNEL);
298 filp->f_dentry = &filp->f_dentry_store;
299 filp->f_op = ldev->ops;
300 filp->f_flags = file->f_flag;
301 vhold(file->f_vnode);
302 filp->f_vnode = file->f_vnode;
303 if (filp->f_op->open) {
304 error = -filp->f_op->open(file->f_vnode, filp);
310 error = devfs_set_cdevpriv(filp, linux_file_dtor);
312 filp->f_op->release(file->f_vnode, filp);
321 linux_dev_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
323 struct linux_cdev *ldev;
324 struct linux_file *filp;
328 file = curthread->td_fpop;
332 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
334 filp->f_flags = file->f_flag;
335 devfs_clear_cdevpriv();
342 linux_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
345 struct linux_cdev *ldev;
346 struct linux_file *filp;
350 file = curthread->td_fpop;
354 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
356 filp->f_flags = file->f_flag;
358 * Linux does not have a generic ioctl copyin/copyout layer. All
359 * linux ioctls must be converted to void ioctls which pass a
360 * pointer to the address of the data. We want the actual user
361 * address so we dereference here.
363 data = *(void **)data;
364 if (filp->f_op->unlocked_ioctl)
365 error = -filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data);
373 linux_dev_read(struct cdev *dev, struct uio *uio, int ioflag)
375 struct linux_cdev *ldev;
376 struct linux_file *filp;
381 file = curthread->td_fpop;
385 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
387 filp->f_flags = file->f_flag;
388 if (uio->uio_iovcnt != 1)
389 panic("linux_dev_read: uio %p iovcnt %d",
390 uio, uio->uio_iovcnt);
391 if (filp->f_op->read) {
392 bytes = filp->f_op->read(filp, uio->uio_iov->iov_base,
393 uio->uio_iov->iov_len, &uio->uio_offset);
395 uio->uio_iov->iov_base += bytes;
396 uio->uio_iov->iov_len -= bytes;
397 uio->uio_resid -= bytes;
407 linux_dev_write(struct cdev *dev, struct uio *uio, int ioflag)
409 struct linux_cdev *ldev;
410 struct linux_file *filp;
415 file = curthread->td_fpop;
419 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
421 filp->f_flags = file->f_flag;
422 if (uio->uio_iovcnt != 1)
423 panic("linux_dev_write: uio %p iovcnt %d",
424 uio, uio->uio_iovcnt);
425 if (filp->f_op->write) {
426 bytes = filp->f_op->write(filp, uio->uio_iov->iov_base,
427 uio->uio_iov->iov_len, &uio->uio_offset);
429 uio->uio_iov->iov_base += bytes;
430 uio->uio_iov->iov_len -= bytes;
431 uio->uio_resid -= bytes;
441 linux_dev_poll(struct cdev *dev, int events, struct thread *td)
443 struct linux_cdev *ldev;
444 struct linux_file *filp;
449 file = curthread->td_fpop;
453 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
455 filp->f_flags = file->f_flag;
456 if (filp->f_op->poll)
457 revents = filp->f_op->poll(filp, NULL) & events;
465 linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
466 vm_size_t size, struct vm_object **object, int nprot)
468 struct linux_cdev *ldev;
469 struct linux_file *filp;
471 struct vm_area_struct vma;
474 file = curthread->td_fpop;
478 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
480 filp->f_flags = file->f_flag;
483 vma.vm_pgoff = *offset / PAGE_SIZE;
485 vma.vm_page_prot = VM_MEMATTR_DEFAULT;
486 if (filp->f_op->mmap) {
487 error = -filp->f_op->mmap(filp, &vma);
491 sg = sglist_alloc(1, M_WAITOK);
492 sglist_append_phys(sg,
493 (vm_paddr_t)vma.vm_pfn << PAGE_SHIFT, vma.vm_len);
494 *object = vm_pager_allocate(OBJT_SG, sg, vma.vm_len,
495 nprot, 0, curthread->td_ucred);
496 if (*object == NULL) {
501 if (vma.vm_page_prot != VM_MEMATTR_DEFAULT) {
502 VM_OBJECT_WLOCK(*object);
503 vm_object_set_memattr(*object,
505 VM_OBJECT_WUNLOCK(*object);
514 struct cdevsw linuxcdevsw = {
515 .d_version = D_VERSION,
516 .d_flags = D_TRACKCLOSE,
517 .d_open = linux_dev_open,
518 .d_close = linux_dev_close,
519 .d_read = linux_dev_read,
520 .d_write = linux_dev_write,
521 .d_ioctl = linux_dev_ioctl,
522 .d_mmap_single = linux_dev_mmap_single,
523 .d_poll = linux_dev_poll,
527 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred,
528 int flags, struct thread *td)
530 struct linux_file *filp;
535 filp = (struct linux_file *)file->f_data;
536 filp->f_flags = file->f_flag;
537 if (uio->uio_iovcnt != 1)
538 panic("linux_file_read: uio %p iovcnt %d",
539 uio, uio->uio_iovcnt);
540 if (filp->f_op->read) {
541 bytes = filp->f_op->read(filp, uio->uio_iov->iov_base,
542 uio->uio_iov->iov_len, &uio->uio_offset);
544 uio->uio_iov->iov_base += bytes;
545 uio->uio_iov->iov_len -= bytes;
546 uio->uio_resid -= bytes;
556 linux_file_poll(struct file *file, int events, struct ucred *active_cred,
559 struct linux_file *filp;
562 filp = (struct linux_file *)file->f_data;
563 filp->f_flags = file->f_flag;
564 if (filp->f_op->poll)
565 revents = filp->f_op->poll(filp, NULL) & events;
573 linux_file_close(struct file *file, struct thread *td)
575 struct linux_file *filp;
578 filp = (struct linux_file *)file->f_data;
579 filp->f_flags = file->f_flag;
580 error = -filp->f_op->release(NULL, filp);
581 funsetown(&filp->f_sigio);
588 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred,
591 struct linux_file *filp;
594 filp = (struct linux_file *)fp->f_data;
595 filp->f_flags = fp->f_flag;
602 if (filp->f_op->fasync == NULL)
604 error = filp->f_op->fasync(0, filp, fp->f_flag & FASYNC);
607 error = fsetown(*(int *)data, &filp->f_sigio);
609 error = filp->f_op->fasync(0, filp,
610 fp->f_flag & FASYNC);
613 *(int *)data = fgetown(&filp->f_sigio);
622 struct fileops linuxfileops = {
623 .fo_read = linux_file_read,
624 .fo_poll = linux_file_poll,
625 .fo_close = linux_file_close,
626 .fo_ioctl = linux_file_ioctl,
627 .fo_chmod = invfo_chmod,
628 .fo_chown = invfo_chown,
629 .fo_sendfile = invfo_sendfile,
633 * Hash of vmmap addresses. This is infrequently accessed and does not
634 * need to be particularly large. This is done because we must store the
635 * caller's idea of the map size to properly unmap.
638 LIST_ENTRY(vmmap) vm_next;
640 unsigned long vm_size;
644 struct vmmap *lh_first;
646 #define VMMAP_HASH_SIZE 64
647 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1)
648 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK
649 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE];
650 static struct mtx vmmaplock;
653 vmmap_add(void *addr, unsigned long size)
657 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL);
658 mtx_lock(&vmmaplock);
659 vmmap->vm_size = size;
660 vmmap->vm_addr = addr;
661 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next);
662 mtx_unlock(&vmmaplock);
665 static struct vmmap *
666 vmmap_remove(void *addr)
670 mtx_lock(&vmmaplock);
671 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
672 if (vmmap->vm_addr == addr)
675 LIST_REMOVE(vmmap, vm_next);
676 mtx_unlock(&vmmaplock);
682 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr)
686 addr = pmap_mapdev_attr(phys_addr, size, attr);
689 vmmap_add(addr, size);
699 vmmap = vmmap_remove(addr);
702 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size);
708 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
713 size = count * PAGE_SIZE;
714 off = kva_alloc(size);
717 vmmap_add((void *)off, size);
718 pmap_qenter(off, pages, count);
720 return ((void *)off);
728 vmmap = vmmap_remove(addr);
731 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE);
732 kva_free((vm_offset_t)addr, vmmap->vm_size);
737 kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
744 len = vsnprintf(NULL, 0, fmt, aq);
747 p = kmalloc(len + 1, gfp);
749 vsnprintf(p, len + 1, fmt, ap);
755 kasprintf(gfp_t gfp, const char *fmt, ...)
761 p = kvasprintf(gfp, fmt, ap);
768 linux_timer_jiffies_until(unsigned long expires)
770 int delta = expires - jiffies;
771 /* guard against already expired values */
778 linux_timer_callback_wrapper(void *context)
780 struct timer_list *timer;
783 timer->function(timer->data);
787 mod_timer(struct timer_list *timer, unsigned long expires)
790 timer->expires = expires;
791 callout_reset(&timer->timer_callout,
792 linux_timer_jiffies_until(expires),
793 &linux_timer_callback_wrapper, timer);
797 add_timer(struct timer_list *timer)
800 callout_reset(&timer->timer_callout,
801 linux_timer_jiffies_until(timer->expires),
802 &linux_timer_callback_wrapper, timer);
806 linux_timer_init(void *arg)
810 * Compute an internal HZ value which can divide 2**32 to
811 * avoid timer rounding problems when the tick value wraps
814 linux_timer_hz_mask = 1;
815 while (linux_timer_hz_mask < (unsigned long)hz)
816 linux_timer_hz_mask *= 2;
817 linux_timer_hz_mask--;
819 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL);
822 linux_complete_common(struct completion *c, int all)
829 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
831 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
838 * Indefinite wait for done != 0 with or without signals.
841 linux_wait_for_common(struct completion *c, int flags)
845 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
847 flags = SLEEPQ_SLEEP;
852 sleepq_add(c, NULL, "completion", flags, 0);
853 if (flags & SLEEPQ_INTERRUPTIBLE) {
854 if (sleepq_wait_sig(c, 0) != 0)
855 return (-ERESTARTSYS);
866 * Time limited wait for done != 0 with or without signals.
869 linux_wait_for_timeout_common(struct completion *c, long timeout, int flags)
871 long end = jiffies + timeout;
874 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
876 flags = SLEEPQ_SLEEP;
883 sleepq_add(c, NULL, "completion", flags, 0);
884 sleepq_set_timeout(c, linux_timer_jiffies_until(end));
885 if (flags & SLEEPQ_INTERRUPTIBLE)
886 ret = sleepq_timedwait_sig(c, 0);
888 ret = sleepq_timedwait(c, 0);
890 /* check for timeout or signal */
891 if (ret == EWOULDBLOCK)
894 return (-ERESTARTSYS);
900 /* return how many jiffies are left */
901 return (linux_timer_jiffies_until(end));
905 linux_try_wait_for_completion(struct completion *c)
920 linux_completion_done(struct completion *c)
933 linux_delayed_work_fn(void *arg)
935 struct delayed_work *work;
938 taskqueue_enqueue(work->work.taskqueue, &work->work.work_task);
942 linux_work_fn(void *context, int pending)
944 struct work_struct *work;
951 linux_flush_fn(void *context, int pending)
955 struct workqueue_struct *
956 linux_create_workqueue_common(const char *name, int cpus)
958 struct workqueue_struct *wq;
960 wq = kmalloc(sizeof(*wq), M_WAITOK);
961 wq->taskqueue = taskqueue_create(name, M_WAITOK,
962 taskqueue_thread_enqueue, &wq->taskqueue);
963 atomic_set(&wq->draining, 0);
964 taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name);
970 destroy_workqueue(struct workqueue_struct *wq)
972 taskqueue_free(wq->taskqueue);
977 linux_compat_init(void *arg)
979 struct sysctl_oid *rootoid;
982 rootoid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(),
983 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys");
984 kobject_init(&class_root, &class_ktype);
985 kobject_set_name(&class_root, "class");
986 class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid),
987 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class");
988 kobject_init(&linux_rootdev.kobj, &dev_ktype);
989 kobject_set_name(&linux_rootdev.kobj, "device");
990 linux_rootdev.kobj.oidp = SYSCTL_ADD_NODE(NULL,
991 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL,
993 linux_rootdev.bsddev = root_bus;
994 miscclass.name = "misc";
995 class_register(&miscclass);
996 INIT_LIST_HEAD(&pci_drivers);
997 INIT_LIST_HEAD(&pci_devices);
998 spin_lock_init(&pci_lock);
999 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF);
1000 for (i = 0; i < VMMAP_HASH_SIZE; i++)
1001 LIST_INIT(&vmmaphead[i]);
1004 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL);
1007 linux_compat_uninit(void *arg)
1009 kobject_kfree_name(&class_root);
1010 kobject_kfree_name(&linux_rootdev.kobj);
1011 kobject_kfree_name(&miscclass.kobj);
1013 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL);