2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/kernel.h>
34 #include <sys/sysctl.h>
36 #include <sys/sleepqueue.h>
38 #include <sys/mutex.h>
40 #include <sys/fcntl.h>
42 #include <sys/filio.h>
43 #include <sys/rwlock.h>
48 #include <machine/stdarg.h>
49 #include <machine/pmap.h>
51 #include <linux/kobject.h>
52 #include <linux/device.h>
53 #include <linux/slab.h>
54 #include <linux/module.h>
55 #include <linux/cdev.h>
56 #include <linux/file.h>
57 #include <linux/sysfs.h>
60 #include <linux/vmalloc.h>
61 #include <linux/timer.h>
62 #include <linux/netdevice.h>
64 #include <vm/vm_pager.h>
66 MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat");
68 #include <linux/rbtree.h>
69 /* Undo Linux compat changes. */
73 #define RB_ROOT(head) (head)->rbh_root
75 struct kobject class_root;
76 struct device linux_rootdev;
77 struct class miscclass;
78 struct list_head pci_drivers;
79 struct list_head pci_devices;
83 unsigned long linux_timer_hz_mask;
86 panic_cmp(struct rb_node *one, struct rb_node *two)
91 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
94 kobject_set_name(struct kobject *kobj, const char *fmt, ...)
100 error = kobject_set_name_vargs(kobj, fmt, args);
107 kobject_add_complete(struct kobject *kobj, struct kobject *parent)
112 kobj->parent = kobject_get(parent);
113 error = sysfs_create_dir(kobj);
114 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) {
115 struct attribute **attr;
118 for (attr = t->default_attrs; *attr != NULL; attr++) {
119 error = sysfs_create_file(kobj, *attr);
124 sysfs_remove_dir(kobj);
131 kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...)
137 error = kobject_set_name_vargs(kobj, fmt, args);
142 return kobject_add_complete(kobj, parent);
146 kobject_release(struct kref *kref)
148 struct kobject *kobj;
151 kobj = container_of(kref, struct kobject, kref);
152 sysfs_remove_dir(kobj);
154 kobject_put(kobj->parent);
157 if (kobj->ktype && kobj->ktype->release)
158 kobj->ktype->release(kobj);
163 kobject_kfree(struct kobject *kobj)
169 kobject_kfree_name(struct kobject *kobj)
176 struct kobj_type kfree_type = { .release = kobject_kfree };
179 dev_release(struct device *dev)
181 pr_debug("dev_release: %s\n", dev_name(dev));
186 device_create(struct class *class, struct device *parent, dev_t devt,
187 void *drvdata, const char *fmt, ...)
192 dev = kzalloc(sizeof(*dev), M_WAITOK);
193 dev->parent = parent;
196 dev->driver_data = drvdata;
197 dev->release = dev_release;
199 kobject_set_name_vargs(&dev->kobj, fmt, args);
201 device_register(dev);
207 kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
208 struct kobject *parent, const char *fmt, ...)
213 kobject_init(kobj, ktype);
215 kobj->parent = parent;
219 error = kobject_set_name_vargs(kobj, fmt, args);
223 return kobject_add_complete(kobj, parent);
227 linux_file_dtor(void *cdp)
229 struct linux_file *filp;
232 filp->f_op->release(filp->f_vnode, filp);
233 vdrop(filp->f_vnode);
238 linux_dev_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
240 struct linux_cdev *ldev;
241 struct linux_file *filp;
245 file = curthread->td_fpop;
249 filp = kzalloc(sizeof(*filp), GFP_KERNEL);
250 filp->f_dentry = &filp->f_dentry_store;
251 filp->f_op = ldev->ops;
252 filp->f_flags = file->f_flag;
253 vhold(file->f_vnode);
254 filp->f_vnode = file->f_vnode;
255 if (filp->f_op->open) {
256 error = -filp->f_op->open(file->f_vnode, filp);
262 error = devfs_set_cdevpriv(filp, linux_file_dtor);
264 filp->f_op->release(file->f_vnode, filp);
273 linux_dev_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
275 struct linux_cdev *ldev;
276 struct linux_file *filp;
280 file = curthread->td_fpop;
284 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
286 filp->f_flags = file->f_flag;
287 devfs_clear_cdevpriv();
294 linux_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
297 struct linux_cdev *ldev;
298 struct linux_file *filp;
302 file = curthread->td_fpop;
306 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
308 filp->f_flags = file->f_flag;
310 * Linux does not have a generic ioctl copyin/copyout layer. All
311 * linux ioctls must be converted to void ioctls which pass a
312 * pointer to the address of the data. We want the actual user
313 * address so we dereference here.
315 data = *(void **)data;
316 if (filp->f_op->unlocked_ioctl)
317 error = -filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data);
325 linux_dev_read(struct cdev *dev, struct uio *uio, int ioflag)
327 struct linux_cdev *ldev;
328 struct linux_file *filp;
333 file = curthread->td_fpop;
337 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
339 filp->f_flags = file->f_flag;
340 if (uio->uio_iovcnt != 1)
341 panic("linux_dev_read: uio %p iovcnt %d",
342 uio, uio->uio_iovcnt);
343 if (filp->f_op->read) {
344 bytes = filp->f_op->read(filp, uio->uio_iov->iov_base,
345 uio->uio_iov->iov_len, &uio->uio_offset);
347 uio->uio_iov->iov_base += bytes;
348 uio->uio_iov->iov_len -= bytes;
349 uio->uio_resid -= bytes;
359 linux_dev_write(struct cdev *dev, struct uio *uio, int ioflag)
361 struct linux_cdev *ldev;
362 struct linux_file *filp;
367 file = curthread->td_fpop;
371 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
373 filp->f_flags = file->f_flag;
374 if (uio->uio_iovcnt != 1)
375 panic("linux_dev_write: uio %p iovcnt %d",
376 uio, uio->uio_iovcnt);
377 if (filp->f_op->write) {
378 bytes = filp->f_op->write(filp, uio->uio_iov->iov_base,
379 uio->uio_iov->iov_len, &uio->uio_offset);
381 uio->uio_iov->iov_base += bytes;
382 uio->uio_iov->iov_len -= bytes;
383 uio->uio_resid -= bytes;
393 linux_dev_poll(struct cdev *dev, int events, struct thread *td)
395 struct linux_cdev *ldev;
396 struct linux_file *filp;
401 file = curthread->td_fpop;
405 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
407 filp->f_flags = file->f_flag;
408 if (filp->f_op->poll)
409 revents = filp->f_op->poll(filp, NULL) & events;
417 linux_dev_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
418 int nprot, vm_memattr_t *memattr)
421 /* XXX memattr not honored. */
427 linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
428 vm_size_t size, struct vm_object **object, int nprot)
430 struct linux_cdev *ldev;
431 struct linux_file *filp;
433 struct vm_area_struct vma;
438 file = curthread->td_fpop;
442 if (size != PAGE_SIZE)
444 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0)
446 filp->f_flags = file->f_flag;
448 vma.vm_end = PAGE_SIZE;
449 vma.vm_pgoff = *offset / PAGE_SIZE;
451 vma.vm_page_prot = 0;
452 if (filp->f_op->mmap) {
453 error = -filp->f_op->mmap(filp, &vma);
455 paddr = (vm_paddr_t)vma.vm_pfn << PAGE_SHIFT;
457 m = PHYS_TO_VM_PAGE(paddr);
458 *object = vm_pager_allocate(OBJT_DEVICE, dev,
459 PAGE_SIZE, nprot, *offset, curthread->td_ucred);
462 if (vma.vm_page_prot != VM_MEMATTR_DEFAULT)
463 pmap_page_set_memattr(m, vma.vm_page_prot);
471 struct cdevsw linuxcdevsw = {
472 .d_version = D_VERSION,
473 .d_flags = D_TRACKCLOSE,
474 .d_open = linux_dev_open,
475 .d_close = linux_dev_close,
476 .d_read = linux_dev_read,
477 .d_write = linux_dev_write,
478 .d_ioctl = linux_dev_ioctl,
479 .d_mmap_single = linux_dev_mmap_single,
480 .d_mmap = linux_dev_mmap,
481 .d_poll = linux_dev_poll,
485 linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred,
486 int flags, struct thread *td)
488 struct linux_file *filp;
493 filp = (struct linux_file *)file->f_data;
494 filp->f_flags = file->f_flag;
495 if (uio->uio_iovcnt != 1)
496 panic("linux_file_read: uio %p iovcnt %d",
497 uio, uio->uio_iovcnt);
498 if (filp->f_op->read) {
499 bytes = filp->f_op->read(filp, uio->uio_iov->iov_base,
500 uio->uio_iov->iov_len, &uio->uio_offset);
502 uio->uio_iov->iov_base += bytes;
503 uio->uio_iov->iov_len -= bytes;
504 uio->uio_resid -= bytes;
514 linux_file_poll(struct file *file, int events, struct ucred *active_cred,
517 struct linux_file *filp;
520 filp = (struct linux_file *)file->f_data;
521 filp->f_flags = file->f_flag;
522 if (filp->f_op->poll)
523 revents = filp->f_op->poll(filp, NULL) & events;
531 linux_file_close(struct file *file, struct thread *td)
533 struct linux_file *filp;
536 filp = (struct linux_file *)file->f_data;
537 filp->f_flags = file->f_flag;
538 error = -filp->f_op->release(NULL, filp);
539 funsetown(&filp->f_sigio);
546 linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred,
549 struct linux_file *filp;
552 filp = (struct linux_file *)fp->f_data;
553 filp->f_flags = fp->f_flag;
560 if (filp->f_op->fasync == NULL)
562 error = filp->f_op->fasync(0, filp, fp->f_flag & FASYNC);
565 error = fsetown(*(int *)data, &filp->f_sigio);
567 error = filp->f_op->fasync(0, filp,
568 fp->f_flag & FASYNC);
571 *(int *)data = fgetown(&filp->f_sigio);
580 struct fileops linuxfileops = {
581 .fo_read = linux_file_read,
582 .fo_poll = linux_file_poll,
583 .fo_close = linux_file_close,
584 .fo_ioctl = linux_file_ioctl,
585 .fo_chmod = invfo_chmod,
586 .fo_chown = invfo_chown,
587 .fo_sendfile = invfo_sendfile,
591 * Hash of vmmap addresses. This is infrequently accessed and does not
592 * need to be particularly large. This is done because we must store the
593 * caller's idea of the map size to properly unmap.
596 LIST_ENTRY(vmmap) vm_next;
598 unsigned long vm_size;
602 struct vmmap *lh_first;
604 #define VMMAP_HASH_SIZE 64
605 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1)
606 #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK
607 static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE];
608 static struct mtx vmmaplock;
611 vmmap_add(void *addr, unsigned long size)
615 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL);
616 mtx_lock(&vmmaplock);
617 vmmap->vm_size = size;
618 vmmap->vm_addr = addr;
619 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next);
620 mtx_unlock(&vmmaplock);
623 static struct vmmap *
624 vmmap_remove(void *addr)
628 mtx_lock(&vmmaplock);
629 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
630 if (vmmap->vm_addr == addr)
633 LIST_REMOVE(vmmap, vm_next);
634 mtx_unlock(&vmmaplock);
640 _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr)
644 addr = pmap_mapdev_attr(phys_addr, size, attr);
647 vmmap_add(addr, size);
657 vmmap = vmmap_remove(addr);
660 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size);
666 vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
671 size = count * PAGE_SIZE;
672 off = kva_alloc(size);
675 vmmap_add((void *)off, size);
676 pmap_qenter(off, pages, count);
678 return ((void *)off);
686 vmmap = vmmap_remove(addr);
689 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE);
690 kva_free((vm_offset_t)addr, vmmap->vm_size);
695 kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
702 len = vsnprintf(NULL, 0, fmt, aq);
705 p = kmalloc(len + 1, gfp);
707 vsnprintf(p, len + 1, fmt, ap);
713 kasprintf(gfp_t gfp, const char *fmt, ...)
719 p = kvasprintf(gfp, fmt, ap);
726 linux_timer_jiffies_until(unsigned long expires)
728 int delta = expires - jiffies;
729 /* guard against already expired values */
736 linux_timer_callback_wrapper(void *context)
738 struct timer_list *timer;
741 timer->function(timer->data);
745 mod_timer(struct timer_list *timer, unsigned long expires)
748 timer->expires = expires;
749 callout_reset(&timer->timer_callout,
750 linux_timer_jiffies_until(expires),
751 &linux_timer_callback_wrapper, timer);
755 add_timer(struct timer_list *timer)
758 callout_reset(&timer->timer_callout,
759 linux_timer_jiffies_until(timer->expires),
760 &linux_timer_callback_wrapper, timer);
764 linux_timer_init(void *arg)
768 * Compute an internal HZ value which can divide 2**32 to
769 * avoid timer rounding problems when the tick value wraps
772 linux_timer_hz_mask = 1;
773 while (linux_timer_hz_mask < (unsigned long)hz)
774 linux_timer_hz_mask *= 2;
775 linux_timer_hz_mask--;
777 SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL);
780 linux_complete_common(struct completion *c, int all)
787 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
789 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
796 * Indefinite wait for done != 0 with or without signals.
799 linux_wait_for_common(struct completion *c, int flags)
803 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
805 flags = SLEEPQ_SLEEP;
810 sleepq_add(c, NULL, "completion", flags, 0);
811 if (flags & SLEEPQ_INTERRUPTIBLE) {
812 if (sleepq_wait_sig(c, 0) != 0)
813 return (-ERESTARTSYS);
824 * Time limited wait for done != 0 with or without signals.
827 linux_wait_for_timeout_common(struct completion *c, long timeout, int flags)
829 long end = jiffies + timeout;
832 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
834 flags = SLEEPQ_SLEEP;
841 sleepq_add(c, NULL, "completion", flags, 0);
842 sleepq_set_timeout(c, linux_timer_jiffies_until(end));
843 if (flags & SLEEPQ_INTERRUPTIBLE)
844 ret = sleepq_timedwait_sig(c, 0);
846 ret = sleepq_timedwait(c, 0);
848 /* check for timeout or signal */
849 if (ret == EWOULDBLOCK)
852 return (-ERESTARTSYS);
858 /* return how many jiffies are left */
859 return (linux_timer_jiffies_until(end));
863 linux_try_wait_for_completion(struct completion *c)
878 linux_completion_done(struct completion *c)
891 linux_compat_init(void *arg)
893 struct sysctl_oid *rootoid;
896 rootoid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(),
897 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys");
898 kobject_init(&class_root, &class_ktype);
899 kobject_set_name(&class_root, "class");
900 class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid),
901 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class");
902 kobject_init(&linux_rootdev.kobj, &dev_ktype);
903 kobject_set_name(&linux_rootdev.kobj, "device");
904 linux_rootdev.kobj.oidp = SYSCTL_ADD_NODE(NULL,
905 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL,
907 linux_rootdev.bsddev = root_bus;
908 miscclass.name = "misc";
909 class_register(&miscclass);
910 INIT_LIST_HEAD(&pci_drivers);
911 INIT_LIST_HEAD(&pci_devices);
912 spin_lock_init(&pci_lock);
913 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF);
914 for (i = 0; i < VMMAP_HASH_SIZE; i++)
915 LIST_INIT(&vmmaphead[i]);
918 SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL);
921 linux_compat_uninit(void *arg)
923 kobject_kfree_name(&class_root);
924 kobject_kfree_name(&linux_rootdev.kobj);
925 kobject_kfree_name(&miscclass.kobj);
927 SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL);