1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Vnode Implementation.
25 \*****************************************************************************/
28 #include <sys/vnode.h>
29 #include <sys/kmem_cache.h>
30 #include <linux/falloc.h>
31 #include <linux/file_compat.h>
33 vnode_t *rootdir = (vnode_t *)0xabcd1234;
34 EXPORT_SYMBOL(rootdir);
36 static spl_kmem_cache_t *vn_cache;
37 static spl_kmem_cache_t *vn_file_cache;
39 static DEFINE_SPINLOCK(vn_file_lock);
40 static LIST_HEAD(vn_file_list);
43 vn_mode_to_vtype(mode_t mode)
67 } /* vn_mode_to_vtype() */
68 EXPORT_SYMBOL(vn_mode_to_vtype);
71 vn_vtype_to_mode(vtype_t vtype)
95 } /* vn_vtype_to_mode() */
96 EXPORT_SYMBOL(vn_vtype_to_mode);
103 vp = kmem_cache_alloc(vn_cache, flag);
111 EXPORT_SYMBOL(vn_alloc);
116 kmem_cache_free(vn_cache, vp);
118 EXPORT_SYMBOL(vn_free);
121 vn_open(const char *path, uio_seg_t seg, int flags, int mode,
122 vnode_t **vpp, int x1, void *x2)
126 int rc, saved_umask = 0;
130 ASSERT(flags & (FWRITE | FREAD));
131 ASSERT(seg == UIO_SYSSPACE);
135 if (!(flags & FCREAT) && (flags & FWRITE))
138 /* Note for filp_open() the two low bits must be remapped to mean:
139 * 01 - read-only -> 00 read-only
140 * 10 - write-only -> 01 write-only
141 * 11 - read-write -> 10 read-write
146 saved_umask = xchg(¤t->fs->umask, 0);
148 fp = filp_open(path, flags, mode);
151 (void)xchg(¤t->fs->umask, saved_umask);
154 return (-PTR_ERR(fp));
156 #ifdef HAVE_2ARGS_VFS_GETATTR
157 rc = vfs_getattr(&fp->f_path, &stat);
159 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
166 vp = vn_alloc(KM_SLEEP);
172 saved_gfp = mapping_gfp_mask(fp->f_mapping);
173 mapping_set_gfp_mask(fp->f_mapping, saved_gfp & ~(__GFP_IO|__GFP_FS));
175 mutex_enter(&vp->v_lock);
176 vp->v_type = vn_mode_to_vtype(stat.mode);
178 vp->v_gfp_mask = saved_gfp;
180 mutex_exit(&vp->v_lock);
184 EXPORT_SYMBOL(vn_open);
187 vn_openat(const char *path, uio_seg_t seg, int flags, int mode,
188 vnode_t **vpp, int x1, void *x2, vnode_t *vp, int fd)
193 ASSERT(vp == rootdir);
195 len = strlen(path) + 2;
196 realpath = kmalloc(len, kmem_flags_convert(KM_SLEEP));
200 (void)snprintf(realpath, len, "/%s", path);
201 rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2);
206 EXPORT_SYMBOL(vn_openat);
209 vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
210 uio_seg_t seg, int ioflag, rlim64_t x2, void *x3, ssize_t *residp)
213 mm_segment_t saved_fs;
217 ASSERT(uio == UIO_WRITE || uio == UIO_READ);
220 ASSERT(seg == UIO_SYSSPACE);
221 ASSERT((ioflag & ~FAPPEND) == 0);
226 if (ioflag & FAPPEND)
229 /* Writable user data segment must be briefly increased for this
230 * process so we can use the user space read call paths to write
231 * in to memory allocated by the kernel. */
236 rc = vfs_write(fp, addr, len, &offset);
238 rc = vfs_read(fp, addr, len, &offset);
255 EXPORT_SYMBOL(vn_rdwr);
258 vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
265 mapping_set_gfp_mask(vp->v_file->f_mapping, vp->v_gfp_mask);
266 rc = filp_close(vp->v_file, 0);
271 EXPORT_SYMBOL(vn_close);
273 /* vn_seek() does not actually seek it only performs bounds checking on the
274 * proposed seek. We perform minimal checking and allow vn_rdwr() to catch
275 * anything more serious. */
277 vn_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, void *ct)
279 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
281 EXPORT_SYMBOL(vn_seek);
284 * spl_basename() takes a NULL-terminated string s as input containing a path.
285 * It returns a char pointer to a string and a length that describe the
286 * basename of the path. If the basename is not "." or "/", it will be an index
287 * into the string. While the string should be NULL terminated, the section
288 * referring to the basename is not. spl_basename is dual-licensed GPLv2+ and
289 * CC0. Anyone wishing to reuse it in another codebase may pick either license.
292 spl_basename(const char *s, const char **str, int *len)
307 while (i && s[i--] == '/');
317 for (end = i; i; i--) {
329 static struct dentry *
330 spl_kern_path_locked(const char *name, struct path *path)
333 struct dentry *dentry;
334 const char *basename;
341 spl_basename(name, &basename, &len);
343 /* We do not accept "." or ".." */
344 if (len <= 2 && basename[0] == '.')
345 if (len == 1 || basename[1] == '.')
346 return (ERR_PTR(-EACCES));
348 rc = kern_path(name, LOOKUP_PARENT, &parent);
350 return (ERR_PTR(rc));
352 /* use I_MUTEX_PARENT because vfs_unlink needs it */
353 spl_inode_lock_nested(parent.dentry->d_inode, I_MUTEX_PARENT);
355 dentry = lookup_one_len(basename, parent.dentry, len);
356 if (IS_ERR(dentry)) {
357 spl_inode_unlock(parent.dentry->d_inode);
366 /* Based on do_unlinkat() from linux/fs/namei.c */
368 vn_remove(const char *path, uio_seg_t seg, int flags)
370 struct dentry *dentry;
372 struct inode *inode = NULL;
375 ASSERT(seg == UIO_SYSSPACE);
376 ASSERT(flags == RMFILE);
378 dentry = spl_kern_path_locked(path, &parent);
379 rc = PTR_ERR(dentry);
380 if (!IS_ERR(dentry)) {
381 if (parent.dentry->d_name.name[parent.dentry->d_name.len]) {
386 inode = dentry->d_inode;
388 atomic_inc(&inode->i_count);
394 #ifdef HAVE_2ARGS_VFS_UNLINK
395 rc = vfs_unlink(parent.dentry->d_inode, dentry);
397 rc = vfs_unlink(parent.dentry->d_inode, dentry, NULL);
398 #endif /* HAVE_2ARGS_VFS_UNLINK */
405 spl_inode_unlock(parent.dentry->d_inode);
407 iput(inode); /* truncate the inode here */
413 rc = !dentry->d_inode ? -ENOENT :
414 S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
417 EXPORT_SYMBOL(vn_remove);
419 /* Based on do_rename() from linux/fs/namei.c */
421 vn_rename(const char *oldname, const char *newname, int x1)
423 struct dentry *old_dir, *new_dir;
424 struct dentry *old_dentry, *new_dentry;
426 struct path old_parent, new_parent;
429 old_dentry = spl_kern_path_locked(oldname, &old_parent);
430 if (IS_ERR(old_dentry)) {
431 rc = PTR_ERR(old_dentry);
435 spl_inode_unlock(old_parent.dentry->d_inode);
437 new_dentry = spl_kern_path_locked(newname, &new_parent);
438 if (IS_ERR(new_dentry)) {
439 rc = PTR_ERR(new_dentry);
443 spl_inode_unlock(new_parent.dentry->d_inode);
446 if (old_parent.mnt != new_parent.mnt)
449 old_dir = old_parent.dentry;
450 new_dir = new_parent.dentry;
451 trap = lock_rename(new_dir, old_dir);
453 /* source should not be ancestor of target */
455 if (old_dentry == trap)
458 /* target should not be an ancestor of source */
460 if (new_dentry == trap)
463 /* source must exist */
465 if (!old_dentry->d_inode)
468 /* unless the source is a directory trailing slashes give -ENOTDIR */
469 if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
471 if (old_dentry->d_name.name[old_dentry->d_name.len])
473 if (new_dentry->d_name.name[new_dentry->d_name.len])
477 #if defined(HAVE_4ARGS_VFS_RENAME)
478 rc = vfs_rename(old_dir->d_inode, old_dentry,
479 new_dir->d_inode, new_dentry);
480 #elif defined(HAVE_5ARGS_VFS_RENAME)
481 rc = vfs_rename(old_dir->d_inode, old_dentry,
482 new_dir->d_inode, new_dentry, NULL);
484 rc = vfs_rename(old_dir->d_inode, old_dentry,
485 new_dir->d_inode, new_dentry, NULL, 0);
488 unlock_rename(new_dir, old_dir);
491 path_put(&new_parent);
494 path_put(&old_parent);
498 EXPORT_SYMBOL(vn_rename);
501 vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
513 #ifdef HAVE_2ARGS_VFS_GETATTR
514 rc = vfs_getattr(&fp->f_path, &stat);
516 rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
521 vap->va_type = vn_mode_to_vtype(stat.mode);
522 vap->va_mode = stat.mode;
523 vap->va_uid = KUID_TO_SUID(stat.uid);
524 vap->va_gid = KGID_TO_SGID(stat.gid);
526 vap->va_nodeid = stat.ino;
527 vap->va_nlink = stat.nlink;
528 vap->va_size = stat.size;
529 vap->va_blksize = stat.blksize;
530 vap->va_atime = stat.atime;
531 vap->va_mtime = stat.mtime;
532 vap->va_ctime = stat.ctime;
533 vap->va_rdev = stat.rdev;
534 vap->va_nblocks = stat.blocks;
538 EXPORT_SYMBOL(vn_getattr);
540 int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
553 * May enter XFS which generates a warning when PF_FSTRANS is set.
554 * To avoid this the flag is cleared over vfs_sync() and then reset.
556 fstrans = spl_fstrans_check();
558 current->flags &= ~(PF_FSTRANS);
560 error = -spl_filp_fsync(vp->v_file, datasync);
562 current->flags |= PF_FSTRANS;
566 EXPORT_SYMBOL(vn_fsync);
568 int vn_space(vnode_t *vp, int cmd, struct flock *bfp, int flag,
569 offset_t offset, void *x6, void *x7)
571 int error = EOPNOTSUPP;
572 #ifdef FALLOC_FL_PUNCH_HOLE
576 if (cmd != F_FREESP || bfp->l_whence != 0)
581 ASSERT(bfp->l_start >= 0 && bfp->l_len > 0);
583 #ifdef FALLOC_FL_PUNCH_HOLE
585 * May enter XFS which generates a warning when PF_FSTRANS is set.
586 * To avoid this the flag is cleared over vfs_sync() and then reset.
588 fstrans = spl_fstrans_check();
590 current->flags &= ~(PF_FSTRANS);
593 * When supported by the underlying file system preferentially
594 * use the fallocate() callback to preallocate the space.
596 error = -spl_filp_fallocate(vp->v_file,
597 FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
598 bfp->l_start, bfp->l_len);
601 current->flags |= PF_FSTRANS;
607 #ifdef HAVE_INODE_TRUNCATE_RANGE
608 if (vp->v_file->f_dentry && vp->v_file->f_dentry->d_inode &&
609 vp->v_file->f_dentry->d_inode->i_op &&
610 vp->v_file->f_dentry->d_inode->i_op->truncate_range) {
611 off_t end = bfp->l_start + bfp->l_len;
613 * Judging from the code in shmem_truncate_range(),
614 * it seems the kernel expects the end offset to be
615 * inclusive and aligned to the end of a page.
617 if (end % PAGE_SIZE != 0) {
618 end &= ~(off_t)(PAGE_SIZE - 1);
619 if (end <= bfp->l_start)
624 vp->v_file->f_dentry->d_inode->i_op->truncate_range(
625 vp->v_file->f_dentry->d_inode,
634 EXPORT_SYMBOL(vn_space);
636 /* Function must be called while holding the vn_file_lock */
638 file_find(int fd, struct task_struct *task)
642 ASSERT(spin_is_locked(&vn_file_lock));
644 list_for_each_entry(fp, &vn_file_list, f_list) {
645 if (fd == fp->f_fd && fp->f_task == task) {
646 ASSERT(atomic_read(&fp->f_ref) != 0);
666 /* Already open just take an extra reference */
667 spin_lock(&vn_file_lock);
669 fp = file_find(fd, current);
674 * areleasef() can cause us to see a stale reference when
675 * userspace has reused a file descriptor before areleasef()
676 * has run. fput() the stale reference and replace it. We
677 * retain the original reference count such that the concurrent
678 * areleasef() will decrement its reference and terminate.
680 if (lfp != fp->f_file) {
682 fp->f_vnode->v_file = lfp;
684 atomic_inc(&fp->f_ref);
685 spin_unlock(&vn_file_lock);
689 spin_unlock(&vn_file_lock);
691 /* File was not yet opened create the object and setup */
692 fp = kmem_cache_alloc(vn_file_cache, KM_SLEEP);
696 mutex_enter(&fp->f_lock);
699 fp->f_task = current;
701 atomic_inc(&fp->f_ref);
707 vp = vn_alloc(KM_SLEEP);
711 #ifdef HAVE_2ARGS_VFS_GETATTR
712 rc = vfs_getattr(&lfp->f_path, &stat);
714 rc = vfs_getattr(lfp->f_path.mnt, lfp->f_dentry, &stat);
719 mutex_enter(&vp->v_lock);
720 vp->v_type = vn_mode_to_vtype(stat.mode);
722 mutex_exit(&vp->v_lock);
727 /* Put it on the tracking list */
728 spin_lock(&vn_file_lock);
729 list_add(&fp->f_list, &vn_file_list);
730 spin_unlock(&vn_file_lock);
732 mutex_exit(&fp->f_lock);
740 mutex_exit(&fp->f_lock);
741 kmem_cache_free(vn_file_cache, fp);
747 static void releasef_locked(file_t *fp)
752 /* Unlinked from list, no refs, safe to free outside mutex */
754 vn_free(fp->f_vnode);
756 kmem_cache_free(vn_file_cache, fp);
762 areleasef(fd, P_FINFO(current));
764 EXPORT_SYMBOL(releasef);
767 vn_areleasef(int fd, uf_info_t *fip)
770 struct task_struct *task = (struct task_struct *)fip;
775 spin_lock(&vn_file_lock);
776 fp = file_find(fd, task);
778 atomic_dec(&fp->f_ref);
779 if (atomic_read(&fp->f_ref) > 0) {
780 spin_unlock(&vn_file_lock);
784 list_del(&fp->f_list);
787 spin_unlock(&vn_file_lock);
791 EXPORT_SYMBOL(areleasef);
795 #ifdef HAVE_SET_FS_PWD_WITH_CONST
796 vn_set_fs_pwd(struct fs_struct *fs, const struct path *path)
798 vn_set_fs_pwd(struct fs_struct *fs, struct path *path)
799 #endif /* HAVE_SET_FS_PWD_WITH_CONST */
803 #ifdef HAVE_FS_STRUCT_SPINLOCK
804 spin_lock(&fs->lock);
808 spin_unlock(&fs->lock);
810 write_lock(&fs->lock);
814 write_unlock(&fs->lock);
815 #endif /* HAVE_FS_STRUCT_SPINLOCK */
822 vn_set_pwd(const char *filename)
825 mm_segment_t saved_fs;
829 * user_path_dir() and __user_walk() both expect 'filename' to be
830 * a user space address so we must briefly increase the data segment
831 * size to ensure strncpy_from_user() does not fail with -EFAULT.
836 rc = user_path_dir(filename, &path);
840 rc = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
844 vn_set_fs_pwd(current->fs, &path);
853 EXPORT_SYMBOL(vn_set_pwd);
856 vn_cache_constructor(void *buf, void *cdrarg, int kmflags)
858 struct vnode *vp = buf;
860 mutex_init(&vp->v_lock, NULL, MUTEX_DEFAULT, NULL);
863 } /* vn_cache_constructor() */
866 vn_cache_destructor(void *buf, void *cdrarg)
868 struct vnode *vp = buf;
870 mutex_destroy(&vp->v_lock);
871 } /* vn_cache_destructor() */
874 vn_file_cache_constructor(void *buf, void *cdrarg, int kmflags)
878 atomic_set(&fp->f_ref, 0);
879 mutex_init(&fp->f_lock, NULL, MUTEX_DEFAULT, NULL);
880 INIT_LIST_HEAD(&fp->f_list);
883 } /* file_cache_constructor() */
886 vn_file_cache_destructor(void *buf, void *cdrarg)
890 mutex_destroy(&fp->f_lock);
891 } /* vn_file_cache_destructor() */
896 vn_cache = kmem_cache_create("spl_vn_cache",
897 sizeof(struct vnode), 64,
898 vn_cache_constructor,
900 NULL, NULL, NULL, KMC_KMEM);
902 vn_file_cache = kmem_cache_create("spl_vn_file_cache",
904 vn_file_cache_constructor,
905 vn_file_cache_destructor,
906 NULL, NULL, NULL, KMC_KMEM);
913 file_t *fp, *next_fp;
916 spin_lock(&vn_file_lock);
918 list_for_each_entry_safe(fp, next_fp, &vn_file_list, f_list) {
919 list_del(&fp->f_list);
924 spin_unlock(&vn_file_lock);
927 printk(KERN_WARNING "WARNING: %d vnode files leaked\n", leaked);
929 kmem_cache_destroy(vn_file_cache);
930 kmem_cache_destroy(vn_cache);