4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
26 * Copyright 2017 Nexenta Systems, Inc.
29 /* Portions Copyright 2007 Jeremy Teo */
30 /* Portions Copyright 2010 Robert Milkowski */
32 #include <sys/types.h>
33 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/sysmacros.h>
37 #include <sys/resource.h>
40 #include <sys/vnode.h>
44 #include <sys/taskq.h>
46 #include <sys/atomic.h>
47 #include <sys/namei.h>
49 #include <sys/cmn_err.h>
50 #include <sys/errno.h>
51 #include <sys/unistd.h>
52 #include <sys/zfs_dir.h>
53 #include <sys/zfs_ioctl.h>
54 #include <sys/fs/zfs.h>
56 #include <sys/dmu_objset.h>
62 #include <sys/dirent.h>
63 #include <sys/policy.h>
64 #include <sys/sunddi.h>
65 #include <sys/filio.h>
67 #include <sys/zfs_ctldir.h>
68 #include <sys/zfs_fuid.h>
69 #include <sys/zfs_sa.h>
70 #include <sys/zfs_rlock.h>
71 #include <sys/extdirent.h>
72 #include <sys/kidmap.h>
75 #include <sys/sched.h>
77 #include <sys/vmmeter.h>
78 #include <vm/vm_param.h>
84 * Each vnode op performs some logical unit of work. To do this, the ZPL must
85 * properly lock its in-core state, create a DMU transaction, do the work,
86 * record this work in the intent log (ZIL), commit the DMU transaction,
87 * and wait for the intent log to commit if it is a synchronous operation.
88 * Moreover, the vnode ops must work in both normal and log replay context.
89 * The ordering of events is important to avoid deadlocks and references
90 * to freed memory. The example below illustrates the following Big Rules:
92 * (1) A check must be made in each zfs thread for a mounted file system.
93 * This is done avoiding races using ZFS_ENTER(zfsvfs).
94 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
95 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
96 * can return EIO from the calling function.
98 * (2) VN_RELE() should always be the last thing except for zil_commit()
99 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
100 * First, if it's the last reference, the vnode/znode
101 * can be freed, so the zp may point to freed memory. Second, the last
102 * reference will call zfs_zinactive(), which may induce a lot of work --
103 * pushing cached pages (which acquires range locks) and syncing out
104 * cached atime changes. Third, zfs_zinactive() may require a new tx,
105 * which could deadlock the system if you were already holding one.
106 * If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
108 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
109 * as they can span dmu_tx_assign() calls.
111 * (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
112 * dmu_tx_assign(). This is critical because we don't want to block
113 * while holding locks.
115 * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
116 * reduces lock contention and CPU usage when we must wait (note that if
117 * throughput is constrained by the storage, nearly every transaction
120 * Note, in particular, that if a lock is sometimes acquired before
121 * the tx assigns, and sometimes after (e.g. z_lock), then failing
122 * to use a non-blocking assign can deadlock the system. The scenario:
124 * Thread A has grabbed a lock before calling dmu_tx_assign().
125 * Thread B is in an already-assigned tx, and blocks for this lock.
126 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
127 * forever, because the previous txg can't quiesce until B's tx commits.
129 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
130 * then drop all locks, call dmu_tx_wait(), and try again. On subsequent
131 * calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
132 * to indicate that this operation has already called dmu_tx_wait().
133 * This will ensure that we don't retry forever, waiting a short bit
136 * (5) If the operation succeeded, generate the intent log entry for it
137 * before dropping locks. This ensures that the ordering of events
138 * in the intent log matches the order in which they actually occurred.
139 * During ZIL replay the zfs_log_* functions will update the sequence
140 * number to indicate the zil transaction has replayed.
142 * (6) At the end of each vnode op, the DMU tx must always commit,
143 * regardless of whether there were any errors.
145 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
146 * to ensure that synchronous semantics are provided when necessary.
148 * In general, this is how things should be ordered in each vnode op:
150 * ZFS_ENTER(zfsvfs); // exit if unmounted
152 * zfs_dirent_lookup(&dl, ...) // lock directory entry (may VN_HOLD())
153 * rw_enter(...); // grab any other locks you need
154 * tx = dmu_tx_create(...); // get DMU tx
155 * dmu_tx_hold_*(); // hold each object you might modify
156 * error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
158 * rw_exit(...); // drop locks
159 * zfs_dirent_unlock(dl); // unlock directory entry
160 * VN_RELE(...); // release held vnodes
161 * if (error == ERESTART) {
167 * dmu_tx_abort(tx); // abort DMU tx
168 * ZFS_EXIT(zfsvfs); // finished in zfs
169 * return (error); // really out of space
171 * error = do_real_work(); // do whatever this VOP does
173 * zfs_log_*(...); // on success, make ZIL entry
174 * dmu_tx_commit(tx); // commit DMU tx -- error or not
175 * rw_exit(...); // drop locks
176 * zfs_dirent_unlock(dl); // unlock directory entry
177 * VN_RELE(...); // release held vnodes
178 * zil_commit(zilog, foid); // synchronous when necessary
179 * ZFS_EXIT(zfsvfs); // finished in zfs
180 * return (error); // done, report error
185 zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
187 znode_t *zp = VTOZ(*vpp);
188 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
193 if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
194 ((flag & FAPPEND) == 0)) {
196 return (SET_ERROR(EPERM));
199 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
200 ZTOV(zp)->v_type == VREG &&
201 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
202 if (fs_vscan(*vpp, cr, 0) != 0) {
204 return (SET_ERROR(EACCES));
208 /* Keep a count of the synchronous opens in the znode */
209 if (flag & (FSYNC | FDSYNC))
210 atomic_inc_32(&zp->z_sync_cnt);
218 zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
219 caller_context_t *ct)
221 znode_t *zp = VTOZ(vp);
222 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
225 * Clean up any locks held by this process on the vp.
227 cleanlocks(vp, ddi_get_pid(), 0);
228 cleanshares(vp, ddi_get_pid());
233 /* Decrement the synchronous opens in the znode */
234 if ((flag & (FSYNC | FDSYNC)) && (count == 1))
235 atomic_dec_32(&zp->z_sync_cnt);
237 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
238 ZTOV(zp)->v_type == VREG &&
239 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
240 VERIFY(fs_vscan(vp, cr, 1) == 0);
247 * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and
248 * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter.
251 zfs_holey(vnode_t *vp, u_long cmd, offset_t *off)
253 znode_t *zp = VTOZ(vp);
254 uint64_t noff = (uint64_t)*off; /* new offset */
259 file_sz = zp->z_size;
260 if (noff >= file_sz) {
261 return (SET_ERROR(ENXIO));
264 if (cmd == _FIO_SEEK_HOLE)
269 error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff);
272 return (SET_ERROR(ENXIO));
275 * We could find a hole that begins after the logical end-of-file,
276 * because dmu_offset_next() only works on whole blocks. If the
277 * EOF falls mid-block, then indicate that the "virtual hole"
278 * at the end of the file begins at the logical EOF, rather than
279 * at the end of the last block.
281 if (noff > file_sz) {
294 zfs_ioctl(vnode_t *vp, u_long com, intptr_t data, int flag, cred_t *cred,
295 int *rvalp, caller_context_t *ct)
299 dmu_object_info_t doi;
310 * The following two ioctls are used by bfu. Faking out,
311 * necessary to avoid bfu errors.
324 if (ddi_copyin((void *)data, &off, sizeof (off), flag))
325 return (SET_ERROR(EFAULT));
327 off = *(offset_t *)data;
330 zfsvfs = zp->z_zfsvfs;
334 /* offset parameter is in/out */
335 error = zfs_holey(vp, com, &off);
340 if (ddi_copyout(&off, (void *)data, sizeof (off), flag))
341 return (SET_ERROR(EFAULT));
343 *(offset_t *)data = off;
348 case _FIO_COUNT_FILLED:
351 * _FIO_COUNT_FILLED adds a new ioctl command which
352 * exposes the number of filled blocks in a
356 zfsvfs = zp->z_zfsvfs;
361 * Wait for all dirty blocks for this object
362 * to get synced out to disk, and the DMU info
365 error = dmu_object_wait_synced(zfsvfs->z_os, zp->z_id);
372 * Retrieve fill count from DMU object.
374 error = dmu_object_info(zfsvfs->z_os, zp->z_id, &doi);
380 ndata = doi.doi_fill_count;
383 if (ddi_copyout(&ndata, (void *)data, sizeof (ndata), flag))
384 return (SET_ERROR(EFAULT));
389 return (SET_ERROR(ENOTTY));
393 page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
400 * At present vm_page_clear_dirty extends the cleared range to DEV_BSIZE
401 * aligned boundaries, if the range is not aligned. As a result a
402 * DEV_BSIZE subrange with partially dirty data may get marked as clean.
403 * It may happen that all DEV_BSIZE subranges are marked clean and thus
404 * the whole page would be considred clean despite have some dirty data.
405 * For this reason we should shrink the range to DEV_BSIZE aligned
406 * boundaries before calling vm_page_clear_dirty.
408 end = rounddown2(off + nbytes, DEV_BSIZE);
409 off = roundup2(off, DEV_BSIZE);
413 zfs_vmobject_assert_wlocked(obj);
415 vm_page_grab_valid(&pp, obj, OFF_TO_IDX(start), VM_ALLOC_NOCREAT |
416 VM_ALLOC_SBUSY | VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY);
418 ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
419 vm_object_pip_add(obj, 1);
420 pmap_remove_write(pp);
422 vm_page_clear_dirty(pp, off, nbytes);
428 page_unbusy(vm_page_t pp)
432 vm_object_pip_wakeup(pp->object);
436 page_wire(vnode_t *vp, int64_t start)
442 zfs_vmobject_assert_wlocked(obj);
444 vm_page_grab_valid(&m, obj, OFF_TO_IDX(start), VM_ALLOC_NOCREAT |
445 VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOBUSY);
450 page_unwire(vm_page_t pp)
453 vm_page_unwire(pp, PQ_ACTIVE);
457 * When a file is memory mapped, we must keep the IO data synchronized
458 * between the DMU cache and the memory mapped pages. What this means:
460 * On Write: If we find a memory mapped page, we write to *both*
461 * the page and the dmu buffer.
464 update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
465 int segflg, dmu_tx_t *tx)
472 ASSERT(segflg != UIO_NOCOPY);
473 ASSERT(vp->v_mount != NULL);
477 off = start & PAGEOFFSET;
478 zfs_vmobject_wlock(obj);
479 vm_object_pip_add(obj, 1);
480 for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
482 int nbytes = imin(PAGESIZE - off, len);
484 if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
485 zfs_vmobject_wunlock(obj);
487 va = zfs_map_page(pp, &sf);
488 (void) dmu_read(os, oid, start+off, nbytes,
489 va+off, DMU_READ_PREFETCH);;
492 zfs_vmobject_wlock(obj);
498 vm_object_pip_wakeup(obj);
499 zfs_vmobject_wunlock(obj);
503 * Read with UIO_NOCOPY flag means that sendfile(2) requests
504 * ZFS to populate a range of page cache pages with data.
506 * NOTE: this function could be optimized to pre-allocate
507 * all pages in advance, drain exclusive busy on all of them,
508 * map them into contiguous KVA region and populate them
509 * in one single dmu_read() call.
512 mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
514 znode_t *zp = VTOZ(vp);
515 objset_t *os = zp->z_zfsvfs->z_os;
525 ASSERT(uio->uio_segflg == UIO_NOCOPY);
526 ASSERT(vp->v_mount != NULL);
529 ASSERT((uio->uio_loffset & PAGEOFFSET) == 0);
531 zfs_vmobject_wlock(obj);
532 for (start = uio->uio_loffset; len > 0; start += PAGESIZE) {
533 int bytes = MIN(PAGESIZE, len);
535 pp = vm_page_grab(obj, OFF_TO_IDX(start), VM_ALLOC_SBUSY |
536 VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY);
537 if (pp->valid == 0) {
538 zfs_vmobject_wunlock(obj);
539 va = zfs_map_page(pp, &sf);
540 error = dmu_read(os, zp->z_id, start, bytes, va,
542 if (bytes != PAGESIZE && error == 0)
543 bzero(va + bytes, PAGESIZE - bytes);
545 zfs_vmobject_wlock(obj);
548 if (!vm_page_busied(pp) && !vm_page_wired(pp) &&
552 pp->valid = VM_PAGE_BITS_ALL;
554 vm_page_activate(pp);
558 ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
563 uio->uio_resid -= bytes;
564 uio->uio_offset += bytes;
567 zfs_vmobject_wunlock(obj);
572 * When a file is memory mapped, we must keep the IO data synchronized
573 * between the DMU cache and the memory mapped pages. What this means:
575 * On Read: We "read" preferentially from memory mapped pages,
576 * else we default from the dmu buffer.
578 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
579 * the file is memory mapped.
582 mappedread(vnode_t *vp, int nbytes, uio_t *uio)
584 znode_t *zp = VTOZ(vp);
592 ASSERT(vp->v_mount != NULL);
596 start = uio->uio_loffset;
597 off = start & PAGEOFFSET;
598 zfs_vmobject_wlock(obj);
599 for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
601 uint64_t bytes = MIN(PAGESIZE - off, len);
603 if (pp = page_wire(vp, start)) {
607 zfs_vmobject_wunlock(obj);
608 va = zfs_map_page(pp, &sf);
610 error = uiomove(va + off, bytes, UIO_READ, uio);
612 error = vn_io_fault_uiomove(va + off, bytes, uio);
615 zfs_vmobject_wlock(obj);
618 zfs_vmobject_wunlock(obj);
619 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
621 zfs_vmobject_wlock(obj);
628 zfs_vmobject_wunlock(obj);
632 offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
635 * Read bytes from specified file into supplied buffer.
637 * IN: vp - vnode of file to be read from.
638 * uio - structure supplying read location, range info,
640 * ioflag - SYNC flags; used to provide FRSYNC semantics.
641 * cr - credentials of caller.
642 * ct - caller context
644 * OUT: uio - updated offset and range, buffer filled.
646 * RETURN: 0 on success, error code on failure.
649 * vp - atime updated if byte count > 0
653 zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
655 znode_t *zp = VTOZ(vp);
656 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
665 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
667 return (SET_ERROR(EACCES));
671 * Validate file offset
673 if (uio->uio_loffset < (offset_t)0) {
675 return (SET_ERROR(EINVAL));
679 * Fasttrack empty reads
681 if (uio->uio_resid == 0) {
687 * Check for mandatory locks
689 if (MANDMODE(zp->z_mode)) {
690 if (error = chklock(vp, FREAD,
691 uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) {
698 * If we're in FRSYNC mode, sync out this znode before reading it.
701 (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
702 zil_commit(zfsvfs->z_log, zp->z_id);
705 * Lock the range against changes.
707 rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER);
710 * If we are reading past end-of-file we can skip
711 * to the end; but we might still need to set atime.
713 if (uio->uio_loffset >= zp->z_size) {
718 ASSERT(uio->uio_loffset < zp->z_size);
719 n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
722 if ((uio->uio_extflg == UIO_XUIO) &&
723 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
725 int blksz = zp->z_blksz;
726 uint64_t offset = uio->uio_loffset;
728 xuio = (xuio_t *)uio;
730 nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
733 ASSERT(offset + n <= blksz);
736 (void) dmu_xuio_init(xuio, nblk);
738 if (vn_has_cached_data(vp)) {
740 * For simplicity, we always allocate a full buffer
741 * even if we only expect to read a portion of a block.
743 while (--nblk >= 0) {
744 (void) dmu_xuio_add(xuio,
745 dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
753 nbytes = MIN(n, zfs_read_chunk_size -
754 P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
757 if (uio->uio_segflg == UIO_NOCOPY)
758 error = mappedread_sf(vp, nbytes, uio);
760 #endif /* __FreeBSD__ */
761 if (vn_has_cached_data(vp)) {
762 error = mappedread(vp, nbytes, uio);
764 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
768 /* convert checksum errors into IO errors */
770 error = SET_ERROR(EIO);
777 zfs_range_unlock(rl);
779 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
785 * Write the bytes to a file.
787 * IN: vp - vnode of file to be written to.
788 * uio - structure supplying write location, range info,
790 * ioflag - FAPPEND, FSYNC, and/or FDSYNC. FAPPEND is
791 * set if in append mode.
792 * cr - credentials of caller.
793 * ct - caller context (NFS/CIFS fem monitor only)
795 * OUT: uio - updated offset and range.
797 * RETURN: 0 on success, error code on failure.
800 * vp - ctime|mtime updated if byte count > 0
805 zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
807 znode_t *zp = VTOZ(vp);
808 rlim64_t limit = MAXOFFSET_T;
809 ssize_t start_resid = uio->uio_resid;
813 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
818 int max_blksz = zfsvfs->z_max_blksz;
821 iovec_t *aiov = NULL;
824 int iovcnt = uio->uio_iovcnt;
825 iovec_t *iovp = uio->uio_iov;
828 sa_bulk_attr_t bulk[4];
829 uint64_t mtime[2], ctime[2];
832 * Fasttrack empty write
838 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
844 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
845 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
846 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
848 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
852 * In a case vp->v_vfsp != zp->z_zfsvfs->z_vfs (e.g. snapshots) our
853 * callers might not be able to detect properly that we are read-only,
854 * so check it explicitly here.
856 if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
858 return (SET_ERROR(EROFS));
862 * If immutable or not appending then return EPERM.
863 * Intentionally allow ZFS_READONLY through here.
864 * See zfs_zaccess_common()
866 if ((zp->z_pflags & ZFS_IMMUTABLE) ||
867 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
868 (uio->uio_loffset < zp->z_size))) {
870 return (SET_ERROR(EPERM));
873 zilog = zfsvfs->z_log;
876 * Validate file offset
878 woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
881 return (SET_ERROR(EINVAL));
885 * Check for mandatory locks before calling zfs_range_lock()
886 * in order to prevent a deadlock with locks set via fcntl().
888 if (MANDMODE((mode_t)zp->z_mode) &&
889 (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) {
896 * Pre-fault the pages to ensure slow (eg NFS) pages
898 * Skip this if uio contains loaned arc_buf.
900 if ((uio->uio_extflg == UIO_XUIO) &&
901 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
902 xuio = (xuio_t *)uio;
904 uio_prefaultpages(MIN(n, max_blksz), uio);
908 * If in append mode, set the io offset pointer to eof.
910 if (ioflag & FAPPEND) {
912 * Obtain an appending range lock to guarantee file append
913 * semantics. We reset the write offset once we have the lock.
915 rl = zfs_range_lock(zp, 0, n, RL_APPEND);
917 if (rl->r_len == UINT64_MAX) {
919 * We overlocked the file because this write will cause
920 * the file block size to increase.
921 * Note that zp_size cannot change with this lock held.
925 uio->uio_loffset = woff;
928 * Note that if the file block size will change as a result of
929 * this write, then this range lock will lock the entire file
930 * so that we can re-write the block safely.
932 rl = zfs_range_lock(zp, woff, n, RL_WRITER);
935 if (vn_rlimit_fsize(vp, uio, uio->uio_td)) {
936 zfs_range_unlock(rl);
942 zfs_range_unlock(rl);
944 return (SET_ERROR(EFBIG));
947 if ((woff + n) > limit || woff > (limit - n))
950 /* Will this write extend the file length? */
951 write_eof = (woff + n > zp->z_size);
953 end_size = MAX(zp->z_size, woff + n);
956 * Write the file in reasonable size chunks. Each chunk is written
957 * in a separate transaction; this keeps the intent log records small
958 * and allows us to do more fine-grained space accounting.
962 woff = uio->uio_loffset;
963 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
964 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
966 dmu_return_arcbuf(abuf);
967 error = SET_ERROR(EDQUOT);
971 if (xuio && abuf == NULL) {
972 ASSERT(i_iov < iovcnt);
974 abuf = dmu_xuio_arcbuf(xuio, i_iov);
975 dmu_xuio_clear(xuio, i_iov);
976 DTRACE_PROBE3(zfs_cp_write, int, i_iov,
977 iovec_t *, aiov, arc_buf_t *, abuf);
978 ASSERT((aiov->iov_base == abuf->b_data) ||
979 ((char *)aiov->iov_base - (char *)abuf->b_data +
980 aiov->iov_len == arc_buf_size(abuf)));
982 } else if (abuf == NULL && n >= max_blksz &&
983 woff >= zp->z_size &&
984 P2PHASE(woff, max_blksz) == 0 &&
985 zp->z_blksz == max_blksz) {
987 * This write covers a full block. "Borrow" a buffer
988 * from the dmu so that we can fill it before we enter
989 * a transaction. This avoids the possibility of
990 * holding up the transaction if the data copy hangs
991 * up on a pagefault (e.g., from an NFS server mapping).
995 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
997 ASSERT(abuf != NULL);
998 ASSERT(arc_buf_size(abuf) == max_blksz);
999 if (error = uiocopy(abuf->b_data, max_blksz,
1000 UIO_WRITE, uio, &cbytes)) {
1001 dmu_return_arcbuf(abuf);
1004 ASSERT(cbytes == max_blksz);
1008 * Start a transaction.
1010 tx = dmu_tx_create(zfsvfs->z_os);
1011 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1012 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
1013 zfs_sa_upgrade_txholds(tx, zp);
1014 error = dmu_tx_assign(tx, TXG_WAIT);
1018 dmu_return_arcbuf(abuf);
1023 * If zfs_range_lock() over-locked we grow the blocksize
1024 * and then reduce the lock range. This will only happen
1025 * on the first iteration since zfs_range_reduce() will
1026 * shrink down r_len to the appropriate size.
1028 if (rl->r_len == UINT64_MAX) {
1031 if (zp->z_blksz > max_blksz) {
1033 * File's blocksize is already larger than the
1034 * "recordsize" property. Only let it grow to
1035 * the next power of 2.
1037 ASSERT(!ISP2(zp->z_blksz));
1038 new_blksz = MIN(end_size,
1039 1 << highbit64(zp->z_blksz));
1041 new_blksz = MIN(end_size, max_blksz);
1043 zfs_grow_blocksize(zp, new_blksz, tx);
1044 zfs_range_reduce(rl, woff, n);
1048 * XXX - should we really limit each write to z_max_blksz?
1049 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
1051 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
1053 if (woff + nbytes > zp->z_size)
1054 vnode_pager_setsize(vp, woff + nbytes);
1057 tx_bytes = uio->uio_resid;
1058 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
1060 tx_bytes -= uio->uio_resid;
1063 ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
1065 * If this is not a full block write, but we are
1066 * extending the file past EOF and this data starts
1067 * block-aligned, use assign_arcbuf(). Otherwise,
1068 * write via dmu_write().
1070 if (tx_bytes < max_blksz && (!write_eof ||
1071 aiov->iov_base != abuf->b_data)) {
1073 dmu_write(zfsvfs->z_os, zp->z_id, woff,
1074 aiov->iov_len, aiov->iov_base, tx);
1075 dmu_return_arcbuf(abuf);
1076 xuio_stat_wbuf_copied();
1078 ASSERT(xuio || tx_bytes == max_blksz);
1079 dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl),
1082 ASSERT(tx_bytes <= uio->uio_resid);
1083 uioskip(uio, tx_bytes);
1085 if (tx_bytes && vn_has_cached_data(vp)) {
1086 update_pages(vp, woff, tx_bytes, zfsvfs->z_os,
1087 zp->z_id, uio->uio_segflg, tx);
1091 * If we made no progress, we're done. If we made even
1092 * partial progress, update the znode and ZIL accordingly.
1094 if (tx_bytes == 0) {
1095 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
1096 (void *)&zp->z_size, sizeof (uint64_t), tx);
1103 * Clear Set-UID/Set-GID bits on successful write if not
1104 * privileged and at least one of the excute bits is set.
1106 * It would be nice to to this after all writes have
1107 * been done, but that would still expose the ISUID/ISGID
1108 * to another app after the partial write is committed.
1110 * Note: we don't call zfs_fuid_map_id() here because
1111 * user 0 is not an ephemeral uid.
1113 mutex_enter(&zp->z_acl_lock);
1114 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
1115 (S_IXUSR >> 6))) != 0 &&
1116 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
1117 secpolicy_vnode_setid_retain(vp, cr,
1118 (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) {
1120 zp->z_mode &= ~(S_ISUID | S_ISGID);
1121 newmode = zp->z_mode;
1122 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
1123 (void *)&newmode, sizeof (uint64_t), tx);
1125 mutex_exit(&zp->z_acl_lock);
1127 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
1131 * Update the file size (zp_size) if it has changed;
1132 * account for possible concurrent updates.
1134 while ((end_size = zp->z_size) < uio->uio_loffset) {
1135 (void) atomic_cas_64(&zp->z_size, end_size,
1140 ASSERT(error == 0 || error == EFAULT);
1144 * If we are replaying and eof is non zero then force
1145 * the file size to the specified eof. Note, there's no
1146 * concurrency during replay.
1148 if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
1149 zp->z_size = zfsvfs->z_replay_eof;
1152 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
1154 (void) sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
1156 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag);
1161 ASSERT(tx_bytes == nbytes);
1166 uio_prefaultpages(MIN(n, max_blksz), uio);
1170 zfs_range_unlock(rl);
1173 * If we're in replay mode, or we made no progress, return error.
1174 * Otherwise, it's at least a partial write, so it's successful.
1176 if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
1183 * EFAULT means that at least one page of the source buffer was not
1184 * available. VFS will re-try remaining I/O upon this error.
1186 if (error == EFAULT) {
1192 if (ioflag & (FSYNC | FDSYNC) ||
1193 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1194 zil_commit(zilog, zp->z_id);
1202 zfs_get_done(zgd_t *zgd, int error)
1204 znode_t *zp = zgd->zgd_private;
1205 objset_t *os = zp->z_zfsvfs->z_os;
1208 dmu_buf_rele(zgd->zgd_db, zgd);
1210 zfs_range_unlock(zgd->zgd_rl);
1213 * Release the vnode asynchronously as we currently have the
1214 * txg stopped from syncing.
1216 VN_RELE_ASYNC(ZTOV(zp), dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
1218 kmem_free(zgd, sizeof (zgd_t));
1222 static int zil_fault_io = 0;
1226 * Get data to generate a TX_WRITE intent log record.
1229 zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
1231 zfsvfs_t *zfsvfs = arg;
1232 objset_t *os = zfsvfs->z_os;
1234 uint64_t object = lr->lr_foid;
1235 uint64_t offset = lr->lr_offset;
1236 uint64_t size = lr->lr_length;
1241 ASSERT3P(lwb, !=, NULL);
1242 ASSERT3P(zio, !=, NULL);
1243 ASSERT3U(size, !=, 0);
1246 * Nothing to do if the file has been removed
1248 if (zfs_zget(zfsvfs, object, &zp) != 0)
1249 return (SET_ERROR(ENOENT));
1250 if (zp->z_unlinked) {
1252 * Release the vnode asynchronously as we currently have the
1253 * txg stopped from syncing.
1255 VN_RELE_ASYNC(ZTOV(zp),
1256 dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
1257 return (SET_ERROR(ENOENT));
1260 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1262 zgd->zgd_private = zp;
1265 * Write records come in two flavors: immediate and indirect.
1266 * For small writes it's cheaper to store the data with the
1267 * log record (immediate); for large writes it's cheaper to
1268 * sync the data and get a pointer to it (indirect) so that
1269 * we don't have to write the data twice.
1271 if (buf != NULL) { /* immediate write */
1272 zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER);
1273 /* test for truncation needs to be done while range locked */
1274 if (offset >= zp->z_size) {
1275 error = SET_ERROR(ENOENT);
1277 error = dmu_read(os, object, offset, size, buf,
1278 DMU_READ_NO_PREFETCH);
1280 ASSERT(error == 0 || error == ENOENT);
1281 } else { /* indirect write */
1283 * Have to lock the whole block to ensure when it's
1284 * written out and its checksum is being calculated
1285 * that no one can change the data. We need to re-check
1286 * blocksize after we get the lock in case it's changed!
1291 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
1293 zgd->zgd_rl = zfs_range_lock(zp, offset, size,
1295 if (zp->z_blksz == size)
1298 zfs_range_unlock(zgd->zgd_rl);
1300 /* test for truncation needs to be done while range locked */
1301 if (lr->lr_offset >= zp->z_size)
1302 error = SET_ERROR(ENOENT);
1305 error = SET_ERROR(EIO);
1310 error = dmu_buf_hold(os, object, offset, zgd, &db,
1311 DMU_READ_NO_PREFETCH);
1314 blkptr_t *bp = &lr->lr_blkptr;
1319 ASSERT(db->db_offset == offset);
1320 ASSERT(db->db_size == size);
1322 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1324 ASSERT(error || lr->lr_length <= size);
1327 * On success, we need to wait for the write I/O
1328 * initiated by dmu_sync() to complete before we can
1329 * release this dbuf. We will finish everything up
1330 * in the zfs_get_done() callback.
1335 if (error == EALREADY) {
1336 lr->lr_common.lrc_txtype = TX_WRITE2;
1338 * TX_WRITE2 relies on the data previously
1339 * written by the TX_WRITE that caused
1340 * EALREADY. We zero out the BP because
1341 * it is the old, currently-on-disk BP.
1350 zfs_get_done(zgd, error);
1357 zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr,
1358 caller_context_t *ct)
1360 znode_t *zp = VTOZ(vp);
1361 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1367 if (flag & V_ACE_MASK)
1368 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
1370 error = zfs_zaccess_rwx(zp, mode, flag, cr);
1377 zfs_dd_callback(struct mount *mp, void *arg, int lkflags, struct vnode **vpp)
1382 error = vn_lock(*vpp, lkflags);
1389 zfs_lookup_lock(vnode_t *dvp, vnode_t *vp, const char *name, int lkflags)
1391 znode_t *zdp = VTOZ(dvp);
1392 zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
1396 ASSERT_VOP_LOCKED(dvp, __func__);
1398 if ((zdp->z_pflags & ZFS_XATTR) == 0)
1399 VERIFY(!RRM_LOCK_HELD(&zfsvfs->z_teardown_lock));
1402 if (name[0] == 0 || (name[0] == '.' && name[1] == 0)) {
1403 ASSERT3P(dvp, ==, vp);
1405 ltype = lkflags & LK_TYPE_MASK;
1406 if (ltype != VOP_ISLOCKED(dvp)) {
1407 if (ltype == LK_EXCLUSIVE)
1408 vn_lock(dvp, LK_UPGRADE | LK_RETRY);
1409 else /* if (ltype == LK_SHARED) */
1410 vn_lock(dvp, LK_DOWNGRADE | LK_RETRY);
1413 * Relock for the "." case could leave us with
1416 if (dvp->v_iflag & VI_DOOMED) {
1418 return (SET_ERROR(ENOENT));
1422 } else if (name[0] == '.' && name[1] == '.' && name[2] == 0) {
1424 * Note that in this case, dvp is the child vnode, and we
1425 * are looking up the parent vnode - exactly reverse from
1426 * normal operation. Unlocking dvp requires some rather
1427 * tricky unlock/relock dance to prevent mp from being freed;
1428 * use vn_vget_ino_gen() which takes care of all that.
1430 * XXX Note that there is a time window when both vnodes are
1431 * unlocked. It is possible, although highly unlikely, that
1432 * during that window the parent-child relationship between
1433 * the vnodes may change, for example, get reversed.
1434 * In that case we would have a wrong lock order for the vnodes.
1435 * All other filesystems seem to ignore this problem, so we
1437 * A potential solution could be implemented as follows:
1438 * - using LK_NOWAIT when locking the second vnode and retrying
1440 * - checking that the parent-child relationship still holds
1441 * after locking both vnodes and retrying if it doesn't
1443 error = vn_vget_ino_gen(dvp, zfs_dd_callback, vp, lkflags, &vp);
1446 error = vn_lock(vp, lkflags);
1454 * Lookup an entry in a directory, or an extended attribute directory.
1455 * If it exists, return a held vnode reference for it.
1457 * IN: dvp - vnode of directory to search.
1458 * nm - name of entry to lookup.
1459 * pnp - full pathname to lookup [UNUSED].
1460 * flags - LOOKUP_XATTR set if looking for an attribute.
1461 * rdir - root directory vnode [UNUSED].
1462 * cr - credentials of caller.
1463 * ct - caller context
1465 * OUT: vpp - vnode of located entry, NULL if not found.
1467 * RETURN: 0 on success, error code on failure.
1474 zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp,
1475 int nameiop, cred_t *cr, kthread_t *td, int flags)
1477 znode_t *zdp = VTOZ(dvp);
1479 zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
1483 * Fast path lookup, however we must skip DNLC lookup
1484 * for case folding or normalizing lookups because the
1485 * DNLC code only stores the passed in name. This means
1486 * creating 'a' and removing 'A' on a case insensitive
1487 * file system would work, but DNLC still thinks 'a'
1488 * exists and won't let you create it again on the next
1489 * pass through fast path.
1491 if (!(flags & LOOKUP_XATTR)) {
1492 if (dvp->v_type != VDIR) {
1493 return (SET_ERROR(ENOTDIR));
1494 } else if (zdp->z_sa_hdl == NULL) {
1495 return (SET_ERROR(EIO));
1499 DTRACE_PROBE2(zfs__fastpath__lookup__miss, vnode_t *, dvp, char *, nm);
1506 if (flags & LOOKUP_XATTR) {
1509 * If the xattr property is off, refuse the lookup request.
1511 if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) {
1513 return (SET_ERROR(EINVAL));
1518 * We don't allow recursive attributes..
1519 * Maybe someday we will.
1521 if (zdp->z_pflags & ZFS_XATTR) {
1523 return (SET_ERROR(EINVAL));
1526 if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) {
1532 * Do we have permission to get into attribute directory?
1534 if (error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, 0,
1545 * Check accessibility of directory.
1547 if (error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr)) {
1552 if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
1553 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1555 return (SET_ERROR(EILSEQ));
1560 * First handle the special cases.
1562 if ((cnp->cn_flags & ISDOTDOT) != 0) {
1564 * If we are a snapshot mounted under .zfs, return
1565 * the vp for the snapshot directory.
1567 if (zdp->z_id == zfsvfs->z_root && zfsvfs->z_parent != zfsvfs) {
1568 struct componentname cn;
1573 ltype = VOP_ISLOCKED(dvp);
1575 error = zfsctl_root(zfsvfs->z_parent, LK_SHARED,
1578 cn.cn_nameptr = "snapshot";
1579 cn.cn_namelen = strlen(cn.cn_nameptr);
1580 cn.cn_nameiop = cnp->cn_nameiop;
1581 cn.cn_flags = cnp->cn_flags & ~ISDOTDOT;
1582 cn.cn_lkflags = cnp->cn_lkflags;
1583 error = VOP_LOOKUP(zfsctl_vp, vpp, &cn);
1586 vn_lock(dvp, ltype | LK_RETRY);
1590 if (zfs_has_ctldir(zdp) && strcmp(nm, ZFS_CTLDIR_NAME) == 0) {
1592 if ((cnp->cn_flags & ISLASTCN) != 0 && nameiop != LOOKUP)
1593 return (SET_ERROR(ENOTSUP));
1594 error = zfsctl_root(zfsvfs, cnp->cn_lkflags, vpp);
1599 * The loop is retry the lookup if the parent-child relationship
1600 * changes during the dot-dot locking complexities.
1605 error = zfs_dirlook(zdp, nm, &zp);
1613 error = zfs_lookup_lock(dvp, *vpp, nm, cnp->cn_lkflags);
1616 * If we've got a locking error, then the vnode
1617 * got reclaimed because of a force unmount.
1618 * We never enter doomed vnodes into the name cache.
1624 if ((cnp->cn_flags & ISDOTDOT) == 0)
1628 if (zdp->z_sa_hdl == NULL) {
1629 error = SET_ERROR(EIO);
1631 error = sa_lookup(zdp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
1632 &parent, sizeof (parent));
1639 if (zp->z_id == parent) {
1650 /* Translate errors and add SAVENAME when needed. */
1651 if (cnp->cn_flags & ISLASTCN) {
1655 if (error == ENOENT) {
1656 error = EJUSTRETURN;
1657 cnp->cn_flags |= SAVENAME;
1663 cnp->cn_flags |= SAVENAME;
1668 /* Insert name into cache (as non-existent) if appropriate. */
1669 if (zfsvfs->z_use_namecache &&
1670 error == ENOENT && (cnp->cn_flags & MAKEENTRY) != 0)
1671 cache_enter(dvp, NULL, cnp);
1673 /* Insert name into cache if appropriate. */
1674 if (zfsvfs->z_use_namecache &&
1675 error == 0 && (cnp->cn_flags & MAKEENTRY)) {
1676 if (!(cnp->cn_flags & ISLASTCN) ||
1677 (nameiop != DELETE && nameiop != RENAME)) {
1678 cache_enter(dvp, *vpp, cnp);
1686 * Attempt to create a new entry in a directory. If the entry
1687 * already exists, truncate the file if permissible, else return
1688 * an error. Return the vp of the created or trunc'd file.
1690 * IN: dvp - vnode of directory to put new file entry in.
1691 * name - name of new file entry.
1692 * vap - attributes of new file.
1693 * excl - flag indicating exclusive or non-exclusive mode.
1694 * mode - mode to open file with.
1695 * cr - credentials of caller.
1696 * flag - large file flag [UNUSED].
1697 * ct - caller context
1698 * vsecp - ACL to be set
1700 * OUT: vpp - vnode of created or trunc'd entry.
1702 * RETURN: 0 on success, error code on failure.
1705 * dvp - ctime|mtime updated if new entry created
1706 * vp - ctime|mtime always, atime if new
1711 zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode,
1712 vnode_t **vpp, cred_t *cr, kthread_t *td)
1714 znode_t *zp, *dzp = VTOZ(dvp);
1715 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1722 gid_t gid = crgetgid(cr);
1723 zfs_acl_ids_t acl_ids;
1724 boolean_t fuid_dirtied;
1730 * If we have an ephemeral id, ACL, or XVATTR then
1731 * make sure file system is at proper version
1734 ksid = crgetsid(cr, KSID_OWNER);
1736 uid = ksid_getid(ksid);
1740 if (zfsvfs->z_use_fuids == B_FALSE &&
1741 (vsecp || (vap->va_mask & AT_XVATTR) ||
1742 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1743 return (SET_ERROR(EINVAL));
1748 zilog = zfsvfs->z_log;
1750 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
1751 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1753 return (SET_ERROR(EILSEQ));
1756 if (vap->va_mask & AT_XVATTR) {
1757 if ((error = secpolicy_xvattr(dvp, (xvattr_t *)vap,
1758 crgetuid(cr), cr, vap->va_type)) != 0) {
1766 if ((vap->va_mode & S_ISVTX) && secpolicy_vnode_stky_modify(cr))
1767 vap->va_mode &= ~S_ISVTX;
1769 error = zfs_dirent_lookup(dzp, name, &zp, ZNEW);
1774 ASSERT3P(zp, ==, NULL);
1777 * Create a new file object and update the directory
1780 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
1785 * We only support the creation of regular files in
1786 * extended attribute directories.
1789 if ((dzp->z_pflags & ZFS_XATTR) &&
1790 (vap->va_type != VREG)) {
1791 error = SET_ERROR(EINVAL);
1795 if ((error = zfs_acl_ids_create(dzp, 0, vap,
1796 cr, vsecp, &acl_ids)) != 0)
1799 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1800 zfs_acl_ids_free(&acl_ids);
1801 error = SET_ERROR(EDQUOT);
1805 getnewvnode_reserve(1);
1807 tx = dmu_tx_create(os);
1809 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1810 ZFS_SA_BASE_ATTR_SIZE);
1812 fuid_dirtied = zfsvfs->z_fuid_dirty;
1814 zfs_fuid_txhold(zfsvfs, tx);
1815 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1816 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1817 if (!zfsvfs->z_use_sa &&
1818 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1819 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1820 0, acl_ids.z_aclp->z_acl_bytes);
1822 error = dmu_tx_assign(tx, TXG_WAIT);
1824 zfs_acl_ids_free(&acl_ids);
1826 getnewvnode_drop_reserve();
1830 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1833 zfs_fuid_sync(zfsvfs, tx);
1835 (void) zfs_link_create(dzp, name, zp, tx, ZNEW);
1836 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1837 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1838 vsecp, acl_ids.z_fuidp, vap);
1839 zfs_acl_ids_free(&acl_ids);
1842 getnewvnode_drop_reserve();
1849 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1850 zil_commit(zilog, 0);
1857 * Remove an entry from a directory.
1859 * IN: dvp - vnode of directory to remove entry from.
1860 * name - name of entry to remove.
1861 * cr - credentials of caller.
1862 * ct - caller context
1863 * flags - case flags
1865 * RETURN: 0 on success, error code on failure.
1869 * vp - ctime (if nlink > 0)
1874 zfs_remove(vnode_t *dvp, vnode_t *vp, char *name, cred_t *cr)
1876 znode_t *dzp = VTOZ(dvp);
1877 znode_t *zp = VTOZ(vp);
1879 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1881 uint64_t acl_obj, xattr_obj;
1884 boolean_t unlinked, toobig = FALSE;
1891 zilog = zfsvfs->z_log;
1897 if (error = zfs_zaccess_delete(dzp, zp, cr)) {
1902 * Need to use rmdir for removing directories.
1904 if (vp->v_type == VDIR) {
1905 error = SET_ERROR(EPERM);
1909 vnevent_remove(vp, dvp, name, ct);
1913 /* are there any extended attributes? */
1914 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1915 &xattr_obj, sizeof (xattr_obj));
1916 if (error == 0 && xattr_obj) {
1917 error = zfs_zget(zfsvfs, xattr_obj, &xzp);
1922 * We may delete the znode now, or we may put it in the unlinked set;
1923 * it depends on whether we're the last link, and on whether there are
1924 * other holds on the vnode. So we dmu_tx_hold() the right things to
1925 * allow for either case.
1927 tx = dmu_tx_create(zfsvfs->z_os);
1928 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1929 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1930 zfs_sa_upgrade_txholds(tx, zp);
1931 zfs_sa_upgrade_txholds(tx, dzp);
1934 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1935 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1938 /* charge as an update -- would be nice not to charge at all */
1939 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1942 * Mark this transaction as typically resulting in a net free of space
1944 dmu_tx_mark_netfree(tx);
1946 error = dmu_tx_assign(tx, TXG_WAIT);
1954 * Remove the directory entry.
1956 error = zfs_link_destroy(dzp, name, zp, tx, ZEXISTS, &unlinked);
1964 zfs_unlinked_add(zp, tx);
1965 vp->v_vflag |= VV_NOSYNC;
1969 zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
1977 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1978 zil_commit(zilog, 0);
1985 * Create a new directory and insert it into dvp using the name
1986 * provided. Return a pointer to the inserted directory.
1988 * IN: dvp - vnode of directory to add subdir to.
1989 * dirname - name of new directory.
1990 * vap - attributes of new directory.
1991 * cr - credentials of caller.
1992 * ct - caller context
1993 * flags - case flags
1994 * vsecp - ACL to be set
1996 * OUT: vpp - vnode of created directory.
1998 * RETURN: 0 on success, error code on failure.
2001 * dvp - ctime|mtime updated
2002 * vp - ctime|mtime|atime updated
2006 zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr)
2008 znode_t *zp, *dzp = VTOZ(dvp);
2009 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
2016 gid_t gid = crgetgid(cr);
2017 zfs_acl_ids_t acl_ids;
2018 boolean_t fuid_dirtied;
2020 ASSERT(vap->va_type == VDIR);
2023 * If we have an ephemeral id, ACL, or XVATTR then
2024 * make sure file system is at proper version
2027 ksid = crgetsid(cr, KSID_OWNER);
2029 uid = ksid_getid(ksid);
2032 if (zfsvfs->z_use_fuids == B_FALSE &&
2033 ((vap->va_mask & AT_XVATTR) ||
2034 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
2035 return (SET_ERROR(EINVAL));
2039 zilog = zfsvfs->z_log;
2041 if (dzp->z_pflags & ZFS_XATTR) {
2043 return (SET_ERROR(EINVAL));
2046 if (zfsvfs->z_utf8 && u8_validate(dirname,
2047 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
2049 return (SET_ERROR(EILSEQ));
2052 if (vap->va_mask & AT_XVATTR) {
2053 if ((error = secpolicy_xvattr(dvp, (xvattr_t *)vap,
2054 crgetuid(cr), cr, vap->va_type)) != 0) {
2060 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
2061 NULL, &acl_ids)) != 0) {
2067 * First make sure the new directory doesn't exist.
2069 * Existence is checked first to make sure we don't return
2070 * EACCES instead of EEXIST which can cause some applications
2075 if (error = zfs_dirent_lookup(dzp, dirname, &zp, ZNEW)) {
2076 zfs_acl_ids_free(&acl_ids);
2080 ASSERT3P(zp, ==, NULL);
2082 if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr)) {
2083 zfs_acl_ids_free(&acl_ids);
2088 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
2089 zfs_acl_ids_free(&acl_ids);
2091 return (SET_ERROR(EDQUOT));
2095 * Add a new entry to the directory.
2097 getnewvnode_reserve(1);
2098 tx = dmu_tx_create(zfsvfs->z_os);
2099 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
2100 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
2101 fuid_dirtied = zfsvfs->z_fuid_dirty;
2103 zfs_fuid_txhold(zfsvfs, tx);
2104 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2105 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
2106 acl_ids.z_aclp->z_acl_bytes);
2109 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
2110 ZFS_SA_BASE_ATTR_SIZE);
2112 error = dmu_tx_assign(tx, TXG_WAIT);
2114 zfs_acl_ids_free(&acl_ids);
2116 getnewvnode_drop_reserve();
2124 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
2127 zfs_fuid_sync(zfsvfs, tx);
2130 * Now put new name in parent dir.
2132 (void) zfs_link_create(dzp, dirname, zp, tx, ZNEW);
2136 txtype = zfs_log_create_txtype(Z_DIR, NULL, vap);
2137 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, NULL,
2138 acl_ids.z_fuidp, vap);
2140 zfs_acl_ids_free(&acl_ids);
2144 getnewvnode_drop_reserve();
2146 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2147 zil_commit(zilog, 0);
2154 * Remove a directory subdir entry. If the current working
2155 * directory is the same as the subdir to be removed, the
2158 * IN: dvp - vnode of directory to remove from.
2159 * name - name of directory to be removed.
2160 * cwd - vnode of current working directory.
2161 * cr - credentials of caller.
2162 * ct - caller context
2163 * flags - case flags
2165 * RETURN: 0 on success, error code on failure.
2168 * dvp - ctime|mtime updated
2172 zfs_rmdir(vnode_t *dvp, vnode_t *vp, char *name, cred_t *cr)
2174 znode_t *dzp = VTOZ(dvp);
2175 znode_t *zp = VTOZ(vp);
2176 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
2184 zilog = zfsvfs->z_log;
2187 if (error = zfs_zaccess_delete(dzp, zp, cr)) {
2191 if (vp->v_type != VDIR) {
2192 error = SET_ERROR(ENOTDIR);
2196 vnevent_rmdir(vp, dvp, name, ct);
2198 tx = dmu_tx_create(zfsvfs->z_os);
2199 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
2200 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2201 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
2202 zfs_sa_upgrade_txholds(tx, zp);
2203 zfs_sa_upgrade_txholds(tx, dzp);
2204 dmu_tx_mark_netfree(tx);
2205 error = dmu_tx_assign(tx, TXG_WAIT);
2214 error = zfs_link_destroy(dzp, name, zp, tx, ZEXISTS, NULL);
2217 uint64_t txtype = TX_RMDIR;
2218 zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
2225 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2226 zil_commit(zilog, 0);
2233 * Read as many directory entries as will fit into the provided
2234 * buffer from the given directory cursor position (specified in
2235 * the uio structure).
2237 * IN: vp - vnode of directory to read.
2238 * uio - structure supplying read location, range info,
2239 * and return buffer.
2240 * cr - credentials of caller.
2241 * ct - caller context
2242 * flags - case flags
2244 * OUT: uio - updated offset and range, buffer filled.
2245 * eofp - set to true if end-of-file detected.
2247 * RETURN: 0 on success, error code on failure.
2250 * vp - atime updated
2252 * Note that the low 4 bits of the cookie returned by zap is always zero.
2253 * This allows us to use the low range for "special" directory entries:
2254 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
2255 * we use the offset 2 for the '.zfs' directory.
2259 zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, int *ncookies, u_long **cookies)
2261 znode_t *zp = VTOZ(vp);
2265 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2270 zap_attribute_t zap;
2271 uint_t bytes_wanted;
2272 uint64_t offset; /* must be unsigned; checks for < 1 */
2278 boolean_t check_sysattrs;
2281 u_long *cooks = NULL;
2287 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
2288 &parent, sizeof (parent))) != 0) {
2294 * If we are not given an eof variable,
2301 * Check for valid iov_len.
2303 if (uio->uio_iov->iov_len <= 0) {
2305 return (SET_ERROR(EINVAL));
2309 * Quit if directory has been removed (posix)
2311 if ((*eofp = zp->z_unlinked) != 0) {
2318 offset = uio->uio_loffset;
2319 prefetch = zp->z_zn_prefetch;
2322 * Initialize the iterator cursor.
2326 * Start iteration from the beginning of the directory.
2328 zap_cursor_init(&zc, os, zp->z_id);
2331 * The offset is a serialized cursor.
2333 zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
2337 * Get space to change directory entries into fs independent format.
2339 iovp = uio->uio_iov;
2340 bytes_wanted = iovp->iov_len;
2341 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) {
2342 bufsize = bytes_wanted;
2343 outbuf = kmem_alloc(bufsize, KM_SLEEP);
2344 odp = (struct dirent64 *)outbuf;
2346 bufsize = bytes_wanted;
2348 odp = (struct dirent64 *)iovp->iov_base;
2350 eodp = (struct edirent *)odp;
2352 if (ncookies != NULL) {
2354 * Minimum entry size is dirent size and 1 byte for a file name.
2356 ncooks = uio->uio_resid / (sizeof(struct dirent) - sizeof(((struct dirent *)NULL)->d_name) + 1);
2357 cooks = malloc(ncooks * sizeof(u_long), M_TEMP, M_WAITOK);
2362 * If this VFS supports the system attribute view interface; and
2363 * we're looking at an extended attribute directory; and we care
2364 * about normalization conflicts on this vfs; then we must check
2365 * for normalization conflicts with the sysattr name space.
2368 check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
2369 (vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm &&
2370 (flags & V_RDDIR_ENTFLAGS);
2376 * Transform to file-system independent format
2379 while (outcount < bytes_wanted) {
2382 off64_t *next = NULL;
2385 * Special case `.', `..', and `.zfs'.
2388 (void) strcpy(zap.za_name, ".");
2389 zap.za_normalization_conflict = 0;
2392 } else if (offset == 1) {
2393 (void) strcpy(zap.za_name, "..");
2394 zap.za_normalization_conflict = 0;
2397 } else if (offset == 2 && zfs_show_ctldir(zp)) {
2398 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
2399 zap.za_normalization_conflict = 0;
2400 objnum = ZFSCTL_INO_ROOT;
2406 if (error = zap_cursor_retrieve(&zc, &zap)) {
2407 if ((*eofp = (error == ENOENT)) != 0)
2413 if (zap.za_integer_length != 8 ||
2414 zap.za_num_integers != 1) {
2415 cmn_err(CE_WARN, "zap_readdir: bad directory "
2416 "entry, obj = %lld, offset = %lld\n",
2417 (u_longlong_t)zp->z_id,
2418 (u_longlong_t)offset);
2419 error = SET_ERROR(ENXIO);
2423 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
2425 * MacOS X can extract the object type here such as:
2426 * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2428 type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2430 if (check_sysattrs && !zap.za_normalization_conflict) {
2432 zap.za_normalization_conflict =
2433 xattr_sysattr_casechk(zap.za_name);
2435 panic("%s:%u: TODO", __func__, __LINE__);
2440 if (flags & V_RDDIR_ACCFILTER) {
2442 * If we have no access at all, don't include
2443 * this entry in the returned information
2446 if (zfs_zget(zp->z_zfsvfs, objnum, &ezp) != 0)
2448 if (!zfs_has_access(ezp, cr)) {
2455 if (flags & V_RDDIR_ENTFLAGS)
2456 reclen = EDIRENT_RECLEN(strlen(zap.za_name));
2458 reclen = DIRENT64_RECLEN(strlen(zap.za_name));
2461 * Will this entry fit in the buffer?
2463 if (outcount + reclen > bufsize) {
2465 * Did we manage to fit anything in the buffer?
2468 error = SET_ERROR(EINVAL);
2473 if (flags & V_RDDIR_ENTFLAGS) {
2475 * Add extended flag entry:
2477 eodp->ed_ino = objnum;
2478 eodp->ed_reclen = reclen;
2479 /* NOTE: ed_off is the offset for the *next* entry. */
2480 next = &eodp->ed_off;
2481 eodp->ed_eflags = zap.za_normalization_conflict ?
2482 ED_CASE_CONFLICT : 0;
2483 (void) strncpy(eodp->ed_name, zap.za_name,
2484 EDIRENT_NAMELEN(reclen));
2485 eodp = (edirent_t *)((intptr_t)eodp + reclen);
2490 odp->d_ino = objnum;
2491 odp->d_reclen = reclen;
2492 odp->d_namlen = strlen(zap.za_name);
2493 /* NOTE: d_off is the offset for the *next* entry. */
2495 (void) strlcpy(odp->d_name, zap.za_name, odp->d_namlen + 1);
2497 dirent_terminate(odp);
2498 odp = (dirent64_t *)((intptr_t)odp + reclen);
2502 ASSERT(outcount <= bufsize);
2504 /* Prefetch znode */
2506 dmu_prefetch(os, objnum, 0, 0, 0,
2507 ZIO_PRIORITY_SYNC_READ);
2511 * Move to the next entry, fill in the previous offset.
2513 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
2514 zap_cursor_advance(&zc);
2515 offset = zap_cursor_serialize(&zc);
2520 /* Fill the offset right after advancing the cursor. */
2523 if (cooks != NULL) {
2526 KASSERT(ncooks >= 0, ("ncookies=%d", ncooks));
2529 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
2531 /* Subtract unused cookies */
2532 if (ncookies != NULL)
2533 *ncookies -= ncooks;
2535 if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) {
2536 iovp->iov_base += outcount;
2537 iovp->iov_len -= outcount;
2538 uio->uio_resid -= outcount;
2539 } else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) {
2541 * Reset the pointer.
2543 offset = uio->uio_loffset;
2547 zap_cursor_fini(&zc);
2548 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
2549 kmem_free(outbuf, bufsize);
2551 if (error == ENOENT)
2554 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
2556 uio->uio_loffset = offset;
2558 if (error != 0 && cookies != NULL) {
2559 free(*cookies, M_TEMP);
2566 ulong_t zfs_fsync_sync_cnt = 4;
2569 zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
2571 znode_t *zp = VTOZ(vp);
2572 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2574 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2576 if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
2579 zil_commit(zfsvfs->z_log, zp->z_id);
2587 * Get the requested file attributes and place them in the provided
2590 * IN: vp - vnode of file.
2591 * vap - va_mask identifies requested attributes.
2592 * If AT_XVATTR set, then optional attrs are requested
2593 * flags - ATTR_NOACLCHECK (CIFS server context)
2594 * cr - credentials of caller.
2595 * ct - caller context
2597 * OUT: vap - attribute values.
2599 * RETURN: 0 (always succeeds).
2603 zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2604 caller_context_t *ct)
2606 znode_t *zp = VTOZ(vp);
2607 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2610 u_longlong_t nblocks;
2611 uint64_t mtime[2], ctime[2], crtime[2], rdev;
2612 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2613 xoptattr_t *xoap = NULL;
2614 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2615 sa_bulk_attr_t bulk[4];
2621 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
2623 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
2624 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
2625 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16);
2626 if (vp->v_type == VBLK || vp->v_type == VCHR)
2627 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
2630 if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
2636 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2637 * Also, if we are the owner don't bother, since owner should
2638 * always be allowed to read basic attributes of file.
2640 if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
2641 (vap->va_uid != crgetuid(cr))) {
2642 if (error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2650 * Return all attributes. It's cheaper to provide the answer
2651 * than to determine whether we were asked the question.
2654 vap->va_type = IFTOVT(zp->z_mode);
2655 vap->va_mode = zp->z_mode & ~S_IFMT;
2657 vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev;
2661 vap->va_nodeid = zp->z_id;
2662 vap->va_nlink = zp->z_links;
2663 if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp) &&
2664 zp->z_links < ZFS_LINK_MAX)
2666 vap->va_size = zp->z_size;
2668 vap->va_rdev = vp->v_rdev;
2670 if (vp->v_type == VBLK || vp->v_type == VCHR)
2671 vap->va_rdev = zfs_cmpldev(rdev);
2673 vap->va_seq = zp->z_seq;
2674 vap->va_flags = 0; /* FreeBSD: Reset chflags(2) flags. */
2675 vap->va_filerev = zp->z_seq;
2678 * Add in any requested optional attributes and the create time.
2679 * Also set the corresponding bits in the returned attribute bitmap.
2681 if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
2682 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2684 ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2685 XVA_SET_RTN(xvap, XAT_ARCHIVE);
2688 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2689 xoap->xoa_readonly =
2690 ((zp->z_pflags & ZFS_READONLY) != 0);
2691 XVA_SET_RTN(xvap, XAT_READONLY);
2694 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2696 ((zp->z_pflags & ZFS_SYSTEM) != 0);
2697 XVA_SET_RTN(xvap, XAT_SYSTEM);
2700 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2702 ((zp->z_pflags & ZFS_HIDDEN) != 0);
2703 XVA_SET_RTN(xvap, XAT_HIDDEN);
2706 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2707 xoap->xoa_nounlink =
2708 ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2709 XVA_SET_RTN(xvap, XAT_NOUNLINK);
2712 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2713 xoap->xoa_immutable =
2714 ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2715 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2718 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2719 xoap->xoa_appendonly =
2720 ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2721 XVA_SET_RTN(xvap, XAT_APPENDONLY);
2724 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2726 ((zp->z_pflags & ZFS_NODUMP) != 0);
2727 XVA_SET_RTN(xvap, XAT_NODUMP);
2730 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2732 ((zp->z_pflags & ZFS_OPAQUE) != 0);
2733 XVA_SET_RTN(xvap, XAT_OPAQUE);
2736 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2737 xoap->xoa_av_quarantined =
2738 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2739 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2742 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2743 xoap->xoa_av_modified =
2744 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2745 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2748 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2749 vp->v_type == VREG) {
2750 zfs_sa_get_scanstamp(zp, xvap);
2753 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2754 xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2755 XVA_SET_RTN(xvap, XAT_REPARSE);
2757 if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2758 xoap->xoa_generation = zp->z_gen;
2759 XVA_SET_RTN(xvap, XAT_GEN);
2762 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2764 ((zp->z_pflags & ZFS_OFFLINE) != 0);
2765 XVA_SET_RTN(xvap, XAT_OFFLINE);
2768 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2770 ((zp->z_pflags & ZFS_SPARSE) != 0);
2771 XVA_SET_RTN(xvap, XAT_SPARSE);
2775 ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
2776 ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2777 ZFS_TIME_DECODE(&vap->va_ctime, ctime);
2778 ZFS_TIME_DECODE(&vap->va_birthtime, crtime);
2781 sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
2782 vap->va_blksize = blksize;
2783 vap->va_bytes = nblocks << 9; /* nblocks * 512 */
2785 if (zp->z_blksz == 0) {
2787 * Block size hasn't been set; suggest maximal I/O transfers.
2789 vap->va_blksize = zfsvfs->z_max_blksz;
2797 * Set the file attributes to the values contained in the
2800 * IN: vp - vnode of file to be modified.
2801 * vap - new attribute values.
2802 * If AT_XVATTR set, then optional attrs are being set
2803 * flags - ATTR_UTIME set if non-default time values provided.
2804 * - ATTR_NOACLCHECK (CIFS context only).
2805 * cr - credentials of caller.
2806 * ct - caller context
2808 * RETURN: 0 on success, error code on failure.
2811 * vp - ctime updated, mtime updated if size changed.
2815 zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2816 caller_context_t *ct)
2818 znode_t *zp = VTOZ(vp);
2819 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2824 uint_t mask = vap->va_mask;
2825 uint_t saved_mask = 0;
2826 uint64_t saved_mode;
2829 uint64_t new_uid, new_gid;
2831 uint64_t mtime[2], ctime[2];
2833 int need_policy = FALSE;
2835 zfs_fuid_info_t *fuidp = NULL;
2836 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2839 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2840 boolean_t fuid_dirtied = B_FALSE;
2841 sa_bulk_attr_t bulk[7], xattr_bulk[7];
2842 int count = 0, xattr_count = 0;
2847 if (mask & AT_NOSET)
2848 return (SET_ERROR(EINVAL));
2853 zilog = zfsvfs->z_log;
2856 * Make sure that if we have ephemeral uid/gid or xvattr specified
2857 * that file system is at proper version level
2860 if (zfsvfs->z_use_fuids == B_FALSE &&
2861 (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2862 ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2863 (mask & AT_XVATTR))) {
2865 return (SET_ERROR(EINVAL));
2868 if (mask & AT_SIZE && vp->v_type == VDIR) {
2870 return (SET_ERROR(EISDIR));
2873 if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) {
2875 return (SET_ERROR(EINVAL));
2879 * If this is an xvattr_t, then get a pointer to the structure of
2880 * optional attributes. If this is NULL, then we have a vattr_t.
2882 xoap = xva_getxoptattr(xvap);
2884 xva_init(&tmpxvattr);
2887 * Immutable files can only alter immutable bit and atime
2889 if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2890 ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) ||
2891 ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2893 return (SET_ERROR(EPERM));
2897 * Note: ZFS_READONLY is handled in zfs_zaccess_common.
2901 * Verify timestamps doesn't overflow 32 bits.
2902 * ZFS can handle large timestamps, but 32bit syscalls can't
2903 * handle times greater than 2039. This check should be removed
2904 * once large timestamps are fully supported.
2906 if (mask & (AT_ATIME | AT_MTIME)) {
2907 if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2908 ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2910 return (SET_ERROR(EOVERFLOW));
2913 if (xoap && (mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME) &&
2914 TIMESPEC_OVERFLOW(&vap->va_birthtime)) {
2916 return (SET_ERROR(EOVERFLOW));
2922 /* Can this be moved to before the top label? */
2923 if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
2925 return (SET_ERROR(EROFS));
2929 * First validate permissions
2932 if (mask & AT_SIZE) {
2934 * XXX - Note, we are not providing any open
2935 * mode flags here (like FNDELAY), so we may
2936 * block if there are locks present... this
2937 * should be addressed in openat().
2939 /* XXX - would it be OK to generate a log record here? */
2940 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2947 if (mask & (AT_ATIME|AT_MTIME) ||
2948 ((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
2949 XVA_ISSET_REQ(xvap, XAT_READONLY) ||
2950 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
2951 XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
2952 XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
2953 XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
2954 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
2955 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
2959 if (mask & (AT_UID|AT_GID)) {
2960 int idmask = (mask & (AT_UID|AT_GID));
2965 * NOTE: even if a new mode is being set,
2966 * we may clear S_ISUID/S_ISGID bits.
2969 if (!(mask & AT_MODE))
2970 vap->va_mode = zp->z_mode;
2973 * Take ownership or chgrp to group we are a member of
2976 take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr));
2977 take_group = (mask & AT_GID) &&
2978 zfs_groupmember(zfsvfs, vap->va_gid, cr);
2981 * If both AT_UID and AT_GID are set then take_owner and
2982 * take_group must both be set in order to allow taking
2985 * Otherwise, send the check through secpolicy_vnode_setattr()
2989 if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) ||
2990 ((idmask == AT_UID) && take_owner) ||
2991 ((idmask == AT_GID) && take_group)) {
2992 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
2993 skipaclchk, cr) == 0) {
2995 * Remove setuid/setgid for non-privileged users
2997 secpolicy_setid_clear(vap, vp, cr);
2998 trim_mask = (mask & (AT_UID|AT_GID));
3007 oldva.va_mode = zp->z_mode;
3008 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
3009 if (mask & AT_XVATTR) {
3011 * Update xvattr mask to include only those attributes
3012 * that are actually changing.
3014 * the bits will be restored prior to actually setting
3015 * the attributes so the caller thinks they were set.
3017 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
3018 if (xoap->xoa_appendonly !=
3019 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
3022 XVA_CLR_REQ(xvap, XAT_APPENDONLY);
3023 XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY);
3027 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
3028 if (xoap->xoa_nounlink !=
3029 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
3032 XVA_CLR_REQ(xvap, XAT_NOUNLINK);
3033 XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK);
3037 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
3038 if (xoap->xoa_immutable !=
3039 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
3042 XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
3043 XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE);
3047 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
3048 if (xoap->xoa_nodump !=
3049 ((zp->z_pflags & ZFS_NODUMP) != 0)) {
3052 XVA_CLR_REQ(xvap, XAT_NODUMP);
3053 XVA_SET_REQ(&tmpxvattr, XAT_NODUMP);
3057 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
3058 if (xoap->xoa_av_modified !=
3059 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
3062 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
3063 XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED);
3067 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
3068 if ((vp->v_type != VREG &&
3069 xoap->xoa_av_quarantined) ||
3070 xoap->xoa_av_quarantined !=
3071 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
3074 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
3075 XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED);
3079 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
3081 return (SET_ERROR(EPERM));
3084 if (need_policy == FALSE &&
3085 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
3086 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
3091 if (mask & AT_MODE) {
3092 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
3093 err = secpolicy_setid_setsticky_clear(vp, vap,
3099 trim_mask |= AT_MODE;
3107 * If trim_mask is set then take ownership
3108 * has been granted or write_acl is present and user
3109 * has the ability to modify mode. In that case remove
3110 * UID|GID and or MODE from mask so that
3111 * secpolicy_vnode_setattr() doesn't revoke it.
3115 saved_mask = vap->va_mask;
3116 vap->va_mask &= ~trim_mask;
3117 if (trim_mask & AT_MODE) {
3119 * Save the mode, as secpolicy_vnode_setattr()
3120 * will overwrite it with ova.va_mode.
3122 saved_mode = vap->va_mode;
3125 err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
3126 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
3133 vap->va_mask |= saved_mask;
3134 if (trim_mask & AT_MODE) {
3136 * Recover the mode after
3137 * secpolicy_vnode_setattr().
3139 vap->va_mode = saved_mode;
3145 * secpolicy_vnode_setattr, or take ownership may have
3148 mask = vap->va_mask;
3150 if ((mask & (AT_UID | AT_GID))) {
3151 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
3152 &xattr_obj, sizeof (xattr_obj));
3154 if (err == 0 && xattr_obj) {
3155 err = zfs_zget(zp->z_zfsvfs, xattr_obj, &attrzp);
3157 err = vn_lock(ZTOV(attrzp), LK_EXCLUSIVE);
3159 vrele(ZTOV(attrzp));
3164 if (mask & AT_UID) {
3165 new_uid = zfs_fuid_create(zfsvfs,
3166 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
3167 if (new_uid != zp->z_uid &&
3168 zfs_fuid_overquota(zfsvfs, B_FALSE, new_uid)) {
3171 err = SET_ERROR(EDQUOT);
3176 if (mask & AT_GID) {
3177 new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
3178 cr, ZFS_GROUP, &fuidp);
3179 if (new_gid != zp->z_gid &&
3180 zfs_fuid_overquota(zfsvfs, B_TRUE, new_gid)) {
3183 err = SET_ERROR(EDQUOT);
3188 tx = dmu_tx_create(zfsvfs->z_os);
3190 if (mask & AT_MODE) {
3191 uint64_t pmode = zp->z_mode;
3193 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
3195 if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
3196 !(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
3197 err = SET_ERROR(EPERM);
3201 if (err = zfs_acl_chmod_setattr(zp, &aclp, new_mode))
3204 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
3206 * Are we upgrading ACL from old V0 format
3209 if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
3210 zfs_znode_acl_version(zp) ==
3211 ZFS_ACL_VERSION_INITIAL) {
3212 dmu_tx_hold_free(tx, acl_obj, 0,
3214 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3215 0, aclp->z_acl_bytes);
3217 dmu_tx_hold_write(tx, acl_obj, 0,
3220 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3221 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3222 0, aclp->z_acl_bytes);
3224 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3226 if ((mask & AT_XVATTR) &&
3227 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3228 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3230 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3234 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
3237 fuid_dirtied = zfsvfs->z_fuid_dirty;
3239 zfs_fuid_txhold(zfsvfs, tx);
3241 zfs_sa_upgrade_txholds(tx, zp);
3243 err = dmu_tx_assign(tx, TXG_WAIT);
3249 * Set each attribute requested.
3250 * We group settings according to the locks they need to acquire.
3252 * Note: you cannot set ctime directly, although it will be
3253 * updated as a side-effect of calling this function.
3256 if (mask & (AT_UID|AT_GID|AT_MODE))
3257 mutex_enter(&zp->z_acl_lock);
3259 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
3260 &zp->z_pflags, sizeof (zp->z_pflags));
3263 if (mask & (AT_UID|AT_GID|AT_MODE))
3264 mutex_enter(&attrzp->z_acl_lock);
3265 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3266 SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
3267 sizeof (attrzp->z_pflags));
3270 if (mask & (AT_UID|AT_GID)) {
3272 if (mask & AT_UID) {
3273 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
3274 &new_uid, sizeof (new_uid));
3275 zp->z_uid = new_uid;
3277 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3278 SA_ZPL_UID(zfsvfs), NULL, &new_uid,
3280 attrzp->z_uid = new_uid;
3284 if (mask & AT_GID) {
3285 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
3286 NULL, &new_gid, sizeof (new_gid));
3287 zp->z_gid = new_gid;
3289 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3290 SA_ZPL_GID(zfsvfs), NULL, &new_gid,
3292 attrzp->z_gid = new_gid;
3295 if (!(mask & AT_MODE)) {
3296 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
3297 NULL, &new_mode, sizeof (new_mode));
3298 new_mode = zp->z_mode;
3300 err = zfs_acl_chown_setattr(zp);
3303 err = zfs_acl_chown_setattr(attrzp);
3308 if (mask & AT_MODE) {
3309 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
3310 &new_mode, sizeof (new_mode));
3311 zp->z_mode = new_mode;
3312 ASSERT3U((uintptr_t)aclp, !=, 0);
3313 err = zfs_aclset_common(zp, aclp, cr, tx);
3315 if (zp->z_acl_cached)
3316 zfs_acl_free(zp->z_acl_cached);
3317 zp->z_acl_cached = aclp;
3322 if (mask & AT_ATIME) {
3323 ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
3324 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
3325 &zp->z_atime, sizeof (zp->z_atime));
3328 if (mask & AT_MTIME) {
3329 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
3330 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
3331 mtime, sizeof (mtime));
3334 /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
3335 if (mask & AT_SIZE && !(mask & AT_MTIME)) {
3336 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
3337 NULL, mtime, sizeof (mtime));
3338 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3339 &ctime, sizeof (ctime));
3340 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
3342 } else if (mask != 0) {
3343 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3344 &ctime, sizeof (ctime));
3345 zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime,
3348 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3349 SA_ZPL_CTIME(zfsvfs), NULL,
3350 &ctime, sizeof (ctime));
3351 zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
3352 mtime, ctime, B_TRUE);
3356 * Do this after setting timestamps to prevent timestamp
3357 * update from toggling bit
3360 if (xoap && (mask & AT_XVATTR)) {
3362 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
3363 xoap->xoa_createtime = vap->va_birthtime;
3365 * restore trimmed off masks
3366 * so that return masks can be set for caller.
3369 if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) {
3370 XVA_SET_REQ(xvap, XAT_APPENDONLY);
3372 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) {
3373 XVA_SET_REQ(xvap, XAT_NOUNLINK);
3375 if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) {
3376 XVA_SET_REQ(xvap, XAT_IMMUTABLE);
3378 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) {
3379 XVA_SET_REQ(xvap, XAT_NODUMP);
3381 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) {
3382 XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
3384 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) {
3385 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
3388 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3389 ASSERT(vp->v_type == VREG);
3391 zfs_xvattr_set(zp, xvap, tx);
3395 zfs_fuid_sync(zfsvfs, tx);
3398 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
3400 if (mask & (AT_UID|AT_GID|AT_MODE))
3401 mutex_exit(&zp->z_acl_lock);
3404 if (mask & (AT_UID|AT_GID|AT_MODE))
3405 mutex_exit(&attrzp->z_acl_lock);
3408 if (err == 0 && attrzp) {
3409 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
3421 zfs_fuid_info_free(fuidp);
3428 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
3433 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3434 zil_commit(zilog, 0);
3441 * We acquire all but fdvp locks using non-blocking acquisitions. If we
3442 * fail to acquire any lock in the path we will drop all held locks,
3443 * acquire the new lock in a blocking fashion, and then release it and
3444 * restart the rename. This acquire/release step ensures that we do not
3445 * spin on a lock waiting for release. On error release all vnode locks
3446 * and decrement references the way tmpfs_rename() would do.
3449 zfs_rename_relock(struct vnode *sdvp, struct vnode **svpp,
3450 struct vnode *tdvp, struct vnode **tvpp,
3451 const struct componentname *scnp, const struct componentname *tcnp)
3454 struct vnode *nvp, *svp, *tvp;
3455 znode_t *sdzp, *tdzp, *szp, *tzp;
3456 const char *snm = scnp->cn_nameptr;
3457 const char *tnm = tcnp->cn_nameptr;
3460 VOP_UNLOCK(tdvp, 0);
3461 if (*tvpp != NULL && *tvpp != tdvp)
3462 VOP_UNLOCK(*tvpp, 0);
3465 error = vn_lock(sdvp, LK_EXCLUSIVE);
3470 error = vn_lock(tdvp, LK_EXCLUSIVE | LK_NOWAIT);
3472 VOP_UNLOCK(sdvp, 0);
3475 error = vn_lock(tdvp, LK_EXCLUSIVE);
3478 VOP_UNLOCK(tdvp, 0);
3484 * Before using sdzp and tdzp we must ensure that they are live.
3485 * As a porting legacy from illumos we have two things to worry
3486 * about. One is typical for FreeBSD and it is that the vnode is
3487 * not reclaimed (doomed). The other is that the znode is live.
3488 * The current code can invalidate the znode without acquiring the
3489 * corresponding vnode lock if the object represented by the znode
3490 * and vnode is no longer valid after a rollback or receive operation.
3491 * z_teardown_lock hidden behind ZFS_ENTER and ZFS_EXIT is the lock
3492 * that protects the znodes from the invalidation.
3494 zfsvfs = sdzp->z_zfsvfs;
3495 ASSERT3P(zfsvfs, ==, tdzp->z_zfsvfs);
3499 * We can not use ZFS_VERIFY_ZP() here because it could directly return
3500 * bypassing the cleanup code in the case of an error.
3502 if (tdzp->z_sa_hdl == NULL || sdzp->z_sa_hdl == NULL) {
3504 VOP_UNLOCK(sdvp, 0);
3505 VOP_UNLOCK(tdvp, 0);
3506 error = SET_ERROR(EIO);
3511 * Re-resolve svp to be certain it still exists and fetch the
3514 error = zfs_dirent_lookup(sdzp, snm, &szp, ZEXISTS);
3516 /* Source entry invalid or not there. */
3518 VOP_UNLOCK(sdvp, 0);
3519 VOP_UNLOCK(tdvp, 0);
3520 if ((scnp->cn_flags & ISDOTDOT) != 0 ||
3521 (scnp->cn_namelen == 1 && scnp->cn_nameptr[0] == '.'))
3522 error = SET_ERROR(EINVAL);
3528 * Re-resolve tvp, if it disappeared we just carry on.
3530 error = zfs_dirent_lookup(tdzp, tnm, &tzp, 0);
3533 VOP_UNLOCK(sdvp, 0);
3534 VOP_UNLOCK(tdvp, 0);
3536 if ((tcnp->cn_flags & ISDOTDOT) != 0)
3537 error = SET_ERROR(EINVAL);
3546 * At present the vnode locks must be acquired before z_teardown_lock,
3547 * although it would be more logical to use the opposite order.
3552 * Now try acquire locks on svp and tvp.
3555 error = vn_lock(nvp, LK_EXCLUSIVE | LK_NOWAIT);
3557 VOP_UNLOCK(sdvp, 0);
3558 VOP_UNLOCK(tdvp, 0);
3561 if (error != EBUSY) {
3565 error = vn_lock(nvp, LK_EXCLUSIVE);
3572 * Concurrent rename race.
3577 error = SET_ERROR(EINVAL);
3592 error = vn_lock(nvp, LK_EXCLUSIVE | LK_NOWAIT);
3594 VOP_UNLOCK(sdvp, 0);
3595 VOP_UNLOCK(tdvp, 0);
3596 VOP_UNLOCK(*svpp, 0);
3597 if (error != EBUSY) {
3601 error = vn_lock(nvp, LK_EXCLUSIVE);
3619 * Note that we must use VRELE_ASYNC in this function as it walks
3620 * up the directory tree and vrele may need to acquire an exclusive
3621 * lock if a last reference to a vnode is dropped.
3624 zfs_rename_check(znode_t *szp, znode_t *sdzp, znode_t *tdzp)
3631 zfsvfs = tdzp->z_zfsvfs;
3633 return (SET_ERROR(EINVAL));
3636 if (tdzp->z_id == zfsvfs->z_root)
3640 ASSERT(!zp->z_unlinked);
3641 if ((error = sa_lookup(zp->z_sa_hdl,
3642 SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent))) != 0)
3645 if (parent == szp->z_id) {
3646 error = SET_ERROR(EINVAL);
3649 if (parent == zfsvfs->z_root)
3651 if (parent == sdzp->z_id)
3654 error = zfs_zget(zfsvfs, parent, &zp1);
3659 VN_RELE_ASYNC(ZTOV(zp),
3660 dsl_pool_vnrele_taskq(dmu_objset_pool(zfsvfs->z_os)));
3664 if (error == ENOTDIR)
3665 panic("checkpath: .. not a directory\n");
3667 VN_RELE_ASYNC(ZTOV(zp),
3668 dsl_pool_vnrele_taskq(dmu_objset_pool(zfsvfs->z_os)));
3673 * Move an entry from the provided source directory to the target
3674 * directory. Change the entry name as indicated.
3676 * IN: sdvp - Source directory containing the "old entry".
3677 * snm - Old entry name.
3678 * tdvp - Target directory to contain the "new entry".
3679 * tnm - New entry name.
3680 * cr - credentials of caller.
3681 * ct - caller context
3682 * flags - case flags
3684 * RETURN: 0 on success, error code on failure.
3687 * sdvp,tdvp - ctime|mtime updated
3691 zfs_rename(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
3692 vnode_t *tdvp, vnode_t **tvpp, struct componentname *tcnp,
3696 znode_t *sdzp, *tdzp, *szp, *tzp;
3697 zilog_t *zilog = NULL;
3699 char *snm = scnp->cn_nameptr;
3700 char *tnm = tcnp->cn_nameptr;
3703 /* Reject renames across filesystems. */
3704 if ((*svpp)->v_mount != tdvp->v_mount ||
3705 ((*tvpp) != NULL && (*svpp)->v_mount != (*tvpp)->v_mount)) {
3706 error = SET_ERROR(EXDEV);
3710 if (zfsctl_is_node(tdvp)) {
3711 error = SET_ERROR(EXDEV);
3716 * Lock all four vnodes to ensure safety and semantics of renaming.
3718 error = zfs_rename_relock(sdvp, svpp, tdvp, tvpp, scnp, tcnp);
3720 /* no vnodes are locked in the case of error here */
3726 zfsvfs = tdzp->z_zfsvfs;
3727 zilog = zfsvfs->z_log;
3730 * After we re-enter ZFS_ENTER() we will have to revalidate all
3735 if (zfsvfs->z_utf8 && u8_validate(tnm,
3736 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3737 error = SET_ERROR(EILSEQ);
3741 /* If source and target are the same file, there is nothing to do. */
3742 if ((*svpp) == (*tvpp)) {
3747 if (((*svpp)->v_type == VDIR && (*svpp)->v_mountedhere != NULL) ||
3748 ((*tvpp) != NULL && (*tvpp)->v_type == VDIR &&
3749 (*tvpp)->v_mountedhere != NULL)) {
3750 error = SET_ERROR(EXDEV);
3755 * We can not use ZFS_VERIFY_ZP() here because it could directly return
3756 * bypassing the cleanup code in the case of an error.
3758 if (tdzp->z_sa_hdl == NULL || sdzp->z_sa_hdl == NULL) {
3759 error = SET_ERROR(EIO);
3764 tzp = *tvpp == NULL ? NULL : VTOZ(*tvpp);
3765 if (szp->z_sa_hdl == NULL || (tzp != NULL && tzp->z_sa_hdl == NULL)) {
3766 error = SET_ERROR(EIO);
3771 * This is to prevent the creation of links into attribute space
3772 * by renaming a linked file into/outof an attribute directory.
3773 * See the comment in zfs_link() for why this is considered bad.
3775 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3776 error = SET_ERROR(EINVAL);
3781 * Must have write access at the source to remove the old entry
3782 * and write access at the target to create the new entry.
3783 * Note that if target and source are the same, this can be
3784 * done in a single check.
3786 if (error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr))
3789 if ((*svpp)->v_type == VDIR) {
3791 * Avoid ".", "..", and aliases of "." for obvious reasons.
3793 if ((scnp->cn_namelen == 1 && scnp->cn_nameptr[0] == '.') ||
3795 (scnp->cn_flags | tcnp->cn_flags) & ISDOTDOT) {
3801 * Check to make sure rename is valid.
3802 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3804 if (error = zfs_rename_check(szp, sdzp, tdzp))
3809 * Does target exist?
3813 * Source and target must be the same type.
3815 if ((*svpp)->v_type == VDIR) {
3816 if ((*tvpp)->v_type != VDIR) {
3817 error = SET_ERROR(ENOTDIR);
3825 if ((*tvpp)->v_type == VDIR) {
3826 error = SET_ERROR(EISDIR);
3832 vnevent_rename_src(*svpp, sdvp, scnp->cn_nameptr, ct);
3834 vnevent_rename_dest(*tvpp, tdvp, tnm, ct);
3837 * notify the target directory if it is not the same
3838 * as source directory.
3841 vnevent_rename_dest_dir(tdvp, ct);
3844 tx = dmu_tx_create(zfsvfs->z_os);
3845 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3846 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3847 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3848 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3850 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3851 zfs_sa_upgrade_txholds(tx, tdzp);
3854 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3855 zfs_sa_upgrade_txholds(tx, tzp);
3858 zfs_sa_upgrade_txholds(tx, szp);
3859 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3860 error = dmu_tx_assign(tx, TXG_WAIT);
3867 if (tzp) /* Attempt to remove the existing target */
3868 error = zfs_link_destroy(tdzp, tnm, tzp, tx, 0, NULL);
3871 error = zfs_link_create(tdzp, tnm, szp, tx, ZRENAMING);
3873 szp->z_pflags |= ZFS_AV_MODIFIED;
3875 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
3876 (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3879 error = zfs_link_destroy(sdzp, snm, szp, tx, ZRENAMING,
3882 zfs_log_rename(zilog, tx, TX_RENAME, sdzp,
3883 snm, tdzp, tnm, szp);
3886 * Update path information for the target vnode
3888 vn_renamepath(tdvp, *svpp, tnm, strlen(tnm));
3891 * At this point, we have successfully created
3892 * the target name, but have failed to remove
3893 * the source name. Since the create was done
3894 * with the ZRENAMING flag, there are
3895 * complications; for one, the link count is
3896 * wrong. The easiest way to deal with this
3897 * is to remove the newly created target, and
3898 * return the original error. This must
3899 * succeed; fortunately, it is very unlikely to
3900 * fail, since we just created it.
3902 VERIFY3U(zfs_link_destroy(tdzp, tnm, szp, tx,
3903 ZRENAMING, NULL), ==, 0);
3910 cache_purge_negative(tdvp);
3916 unlockout: /* all 4 vnodes are locked, ZFS_ENTER called */
3918 VOP_UNLOCK(*svpp, 0);
3919 VOP_UNLOCK(sdvp, 0);
3921 out: /* original two vnodes are locked */
3922 if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3923 zil_commit(zilog, 0);
3926 VOP_UNLOCK(*tvpp, 0);
3928 VOP_UNLOCK(tdvp, 0);
3933 * Insert the indicated symbolic reference entry into the directory.
3935 * IN: dvp - Directory to contain new symbolic link.
3936 * link - Name for new symlink entry.
3937 * vap - Attributes of new entry.
3938 * cr - credentials of caller.
3939 * ct - caller context
3940 * flags - case flags
3942 * RETURN: 0 on success, error code on failure.
3945 * dvp - ctime|mtime updated
3949 zfs_symlink(vnode_t *dvp, vnode_t **vpp, char *name, vattr_t *vap, char *link,
3950 cred_t *cr, kthread_t *td)
3952 znode_t *zp, *dzp = VTOZ(dvp);
3954 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
3956 uint64_t len = strlen(link);
3958 zfs_acl_ids_t acl_ids;
3959 boolean_t fuid_dirtied;
3960 uint64_t txtype = TX_SYMLINK;
3963 ASSERT(vap->va_type == VLNK);
3967 zilog = zfsvfs->z_log;
3969 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
3970 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3972 return (SET_ERROR(EILSEQ));
3975 if (len > MAXPATHLEN) {
3977 return (SET_ERROR(ENAMETOOLONG));
3980 if ((error = zfs_acl_ids_create(dzp, 0,
3981 vap, cr, NULL, &acl_ids)) != 0) {
3987 * Attempt to lock directory; fail if entry already exists.
3989 error = zfs_dirent_lookup(dzp, name, &zp, ZNEW);
3991 zfs_acl_ids_free(&acl_ids);
3996 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
3997 zfs_acl_ids_free(&acl_ids);
4002 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
4003 zfs_acl_ids_free(&acl_ids);
4005 return (SET_ERROR(EDQUOT));
4008 getnewvnode_reserve(1);
4009 tx = dmu_tx_create(zfsvfs->z_os);
4010 fuid_dirtied = zfsvfs->z_fuid_dirty;
4011 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
4012 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4013 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
4014 ZFS_SA_BASE_ATTR_SIZE + len);
4015 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
4016 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
4017 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
4018 acl_ids.z_aclp->z_acl_bytes);
4021 zfs_fuid_txhold(zfsvfs, tx);
4022 error = dmu_tx_assign(tx, TXG_WAIT);
4024 zfs_acl_ids_free(&acl_ids);
4026 getnewvnode_drop_reserve();
4032 * Create a new object for the symlink.
4033 * for version 4 ZPL datsets the symlink will be an SA attribute
4035 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
4038 zfs_fuid_sync(zfsvfs, tx);
4041 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
4044 zfs_sa_symlink(zp, link, len, tx);
4047 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
4048 &zp->z_size, sizeof (zp->z_size), tx);
4050 * Insert the new object into the directory.
4052 (void) zfs_link_create(dzp, name, zp, tx, ZNEW);
4054 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
4057 zfs_acl_ids_free(&acl_ids);
4061 getnewvnode_drop_reserve();
4063 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4064 zil_commit(zilog, 0);
4071 * Return, in the buffer contained in the provided uio structure,
4072 * the symbolic path referred to by vp.
4074 * IN: vp - vnode of symbolic link.
4075 * uio - structure to contain the link path.
4076 * cr - credentials of caller.
4077 * ct - caller context
4079 * OUT: uio - structure containing the link path.
4081 * RETURN: 0 on success, error code on failure.
4084 * vp - atime updated
4088 zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr, caller_context_t *ct)
4090 znode_t *zp = VTOZ(vp);
4091 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4098 error = sa_lookup_uio(zp->z_sa_hdl,
4099 SA_ZPL_SYMLINK(zfsvfs), uio);
4101 error = zfs_sa_readlink(zp, uio);
4103 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
4110 * Insert a new entry into directory tdvp referencing svp.
4112 * IN: tdvp - Directory to contain new entry.
4113 * svp - vnode of new entry.
4114 * name - name of new entry.
4115 * cr - credentials of caller.
4116 * ct - caller context
4118 * RETURN: 0 on success, error code on failure.
4121 * tdvp - ctime|mtime updated
4122 * svp - ctime updated
4126 zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr,
4127 caller_context_t *ct, int flags)
4129 znode_t *dzp = VTOZ(tdvp);
4131 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
4138 ASSERT(tdvp->v_type == VDIR);
4142 zilog = zfsvfs->z_log;
4145 * POSIX dictates that we return EPERM here.
4146 * Better choices include ENOTSUP or EISDIR.
4148 if (svp->v_type == VDIR) {
4150 return (SET_ERROR(EPERM));
4156 if (szp->z_pflags & (ZFS_APPENDONLY | ZFS_IMMUTABLE | ZFS_READONLY)) {
4158 return (SET_ERROR(EPERM));
4161 /* Prevent links to .zfs/shares files */
4163 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
4164 &parent, sizeof (uint64_t))) != 0) {
4168 if (parent == zfsvfs->z_shares_dir) {
4170 return (SET_ERROR(EPERM));
4173 if (zfsvfs->z_utf8 && u8_validate(name,
4174 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
4176 return (SET_ERROR(EILSEQ));
4180 * We do not support links between attributes and non-attributes
4181 * because of the potential security risk of creating links
4182 * into "normal" file space in order to circumvent restrictions
4183 * imposed in attribute space.
4185 if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
4187 return (SET_ERROR(EINVAL));
4191 owner = zfs_fuid_map_id(zfsvfs, szp->z_uid, cr, ZFS_OWNER);
4192 if (owner != crgetuid(cr) && secpolicy_basic_link(svp, cr) != 0) {
4194 return (SET_ERROR(EPERM));
4197 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
4203 * Attempt to lock directory; fail if entry already exists.
4205 error = zfs_dirent_lookup(dzp, name, &tzp, ZNEW);
4211 tx = dmu_tx_create(zfsvfs->z_os);
4212 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
4213 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4214 zfs_sa_upgrade_txholds(tx, szp);
4215 zfs_sa_upgrade_txholds(tx, dzp);
4216 error = dmu_tx_assign(tx, TXG_WAIT);
4223 error = zfs_link_create(dzp, name, szp, tx, 0);
4226 uint64_t txtype = TX_LINK;
4227 zfs_log_link(zilog, tx, txtype, dzp, szp, name);
4233 vnevent_link(svp, ct);
4236 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4237 zil_commit(zilog, 0);
4246 zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
4248 znode_t *zp = VTOZ(vp);
4249 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4252 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
4253 if (zp->z_sa_hdl == NULL) {
4255 * The fs has been unmounted, or we did a
4256 * suspend/resume and this file no longer exists.
4258 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4263 if (zp->z_unlinked) {
4265 * Fast path to recycle a vnode of a removed file.
4267 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4272 if (zp->z_atime_dirty && zp->z_unlinked == 0) {
4273 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
4275 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4276 zfs_sa_upgrade_txholds(tx, zp);
4277 error = dmu_tx_assign(tx, TXG_WAIT);
4281 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
4282 (void *)&zp->z_atime, sizeof (zp->z_atime), tx);
4283 zp->z_atime_dirty = 0;
4287 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4291 CTASSERT(sizeof(struct zfid_short) <= sizeof(struct fid));
4292 CTASSERT(sizeof(struct zfid_long) <= sizeof(struct fid));
4296 zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
4298 znode_t *zp = VTOZ(vp);
4299 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4302 uint64_t object = zp->z_id;
4309 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
4310 &gen64, sizeof (uint64_t))) != 0) {
4315 gen = (uint32_t)gen64;
4317 size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN;
4320 if (fidp->fid_len < size) {
4321 fidp->fid_len = size;
4323 return (SET_ERROR(ENOSPC));
4326 fidp->fid_len = size;
4329 zfid = (zfid_short_t *)fidp;
4331 zfid->zf_len = size;
4333 for (i = 0; i < sizeof (zfid->zf_object); i++)
4334 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
4336 /* Must have a non-zero generation number to distinguish from .zfs */
4339 for (i = 0; i < sizeof (zfid->zf_gen); i++)
4340 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
4342 if (size == LONG_FID_LEN) {
4343 uint64_t objsetid = dmu_objset_id(zfsvfs->z_os);
4346 zlfid = (zfid_long_t *)fidp;
4348 for (i = 0; i < sizeof (zlfid->zf_setid); i++)
4349 zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
4351 /* XXX - this should be the generation number for the objset */
4352 for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
4353 zlfid->zf_setgen[i] = 0;
4361 zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
4362 caller_context_t *ct)
4370 *valp = MIN(LONG_MAX, ZFS_LINK_MAX);
4373 case _PC_FILESIZEBITS:
4377 case _PC_XATTR_EXISTS:
4379 zfsvfs = zp->z_zfsvfs;
4383 error = zfs_dirent_lookup(zp, "", &xzp,
4384 ZXATTR | ZEXISTS | ZSHARED);
4386 if (!zfs_dirempty(xzp))
4389 } else if (error == ENOENT) {
4391 * If there aren't extended attributes, it's the
4392 * same as having zero of them.
4399 case _PC_SATTR_ENABLED:
4400 case _PC_SATTR_EXISTS:
4401 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
4402 (vp->v_type == VREG || vp->v_type == VDIR);
4405 case _PC_ACCESS_FILTERING:
4406 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_ACCESS_FILTER) &&
4410 case _PC_ACL_ENABLED:
4411 *valp = _ACL_ACE_ENABLED;
4413 #endif /* illumos */
4414 case _PC_MIN_HOLE_SIZE:
4415 *valp = (int)SPA_MINBLOCKSIZE;
4418 case _PC_TIMESTAMP_RESOLUTION:
4419 /* nanosecond timestamp resolution */
4423 case _PC_ACL_EXTENDED:
4431 case _PC_ACL_PATH_MAX:
4432 *valp = ACL_MAX_ENTRIES;
4436 return (EOPNOTSUPP);
4442 zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
4443 caller_context_t *ct)
4445 znode_t *zp = VTOZ(vp);
4446 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4448 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4452 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
4460 zfs_setsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
4461 caller_context_t *ct)
4463 znode_t *zp = VTOZ(vp);
4464 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4466 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4467 zilog_t *zilog = zfsvfs->z_log;
4472 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
4474 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4475 zil_commit(zilog, 0);
4482 zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind,
4485 znode_t *zp = VTOZ(vp);
4486 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4487 objset_t *os = zp->z_zfsvfs->z_os;
4490 off_t start, end, obj_size;
4492 int pgsin_b, pgsin_a;
4498 start = IDX_TO_OFF(ma[0]->pindex);
4499 end = IDX_TO_OFF(ma[count - 1]->pindex + 1);
4502 * Lock a range covering all required and optional pages.
4503 * Note that we need to handle the case of the block size growing.
4506 blksz = zp->z_blksz;
4507 rl = zfs_range_lock(zp, rounddown(start, blksz),
4508 roundup(end, blksz) - rounddown(start, blksz), RL_READER);
4509 if (blksz == zp->z_blksz)
4511 zfs_range_unlock(rl);
4514 object = ma[0]->object;
4515 zfs_vmobject_wlock(object);
4516 obj_size = object->un_pager.vnp.vnp_size;
4517 zfs_vmobject_wunlock(object);
4518 if (IDX_TO_OFF(ma[count - 1]->pindex) >= obj_size) {
4519 zfs_range_unlock(rl);
4521 return (zfs_vm_pagerret_bad);
4525 if (rbehind != NULL) {
4526 pgsin_b = OFF_TO_IDX(start - rounddown(start, blksz));
4527 pgsin_b = MIN(*rbehind, pgsin_b);
4531 if (rahead != NULL) {
4532 pgsin_a = OFF_TO_IDX(roundup(end, blksz) - end);
4533 if (end + IDX_TO_OFF(pgsin_a) >= obj_size)
4534 pgsin_a = OFF_TO_IDX(round_page(obj_size) - end);
4535 pgsin_a = MIN(*rahead, pgsin_a);
4539 * NB: we need to pass the exact byte size of the data that we expect
4540 * to read after accounting for the file size. This is required because
4541 * ZFS will panic if we request DMU to read beyond the end of the last
4544 error = dmu_read_pages(os, zp->z_id, ma, count, &pgsin_b, &pgsin_a,
4545 MIN(end, obj_size) - (end - PAGE_SIZE));
4547 zfs_range_unlock(rl);
4548 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
4552 return (zfs_vm_pagerret_error);
4554 VM_CNT_INC(v_vnodein);
4555 VM_CNT_ADD(v_vnodepgsin, count + pgsin_b + pgsin_a);
4556 if (rbehind != NULL)
4560 return (zfs_vm_pagerret_ok);
4564 zfs_freebsd_getpages(ap)
4565 struct vop_getpages_args /* {
4574 return (zfs_getpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind,
4579 zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
4582 znode_t *zp = VTOZ(vp);
4583 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4592 vm_ooffset_t lo_off;
4603 object = vp->v_object;
4607 KASSERT(ma[0]->object == object, ("mismatching object"));
4608 KASSERT(len > 0 && (len & PAGE_MASK) == 0, ("unexpected length"));
4610 for (i = 0; i < pcount; i++)
4611 rtvals[i] = zfs_vm_pagerret_error;
4613 off = IDX_TO_OFF(ma[0]->pindex);
4614 blksz = zp->z_blksz;
4615 lo_off = rounddown(off, blksz);
4616 lo_len = roundup(len + (off - lo_off), blksz);
4617 rl = zfs_range_lock(zp, lo_off, lo_len, RL_WRITER);
4619 zfs_vmobject_wlock(object);
4620 if (len + off > object->un_pager.vnp.vnp_size) {
4621 if (object->un_pager.vnp.vnp_size > off) {
4624 len = object->un_pager.vnp.vnp_size - off;
4626 if ((pgoff = (int)len & PAGE_MASK) != 0) {
4628 * If the object is locked and the following
4629 * conditions hold, then the page's dirty
4630 * field cannot be concurrently changed by a
4634 vm_page_assert_sbusied(m);
4635 KASSERT(!pmap_page_is_write_mapped(m),
4636 ("zfs_putpages: page %p is not read-only", m));
4637 vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
4644 if (ncount < pcount) {
4645 for (i = ncount; i < pcount; i++) {
4646 rtvals[i] = zfs_vm_pagerret_bad;
4650 zfs_vmobject_wunlock(object);
4655 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
4656 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
4660 tx = dmu_tx_create(zfsvfs->z_os);
4661 dmu_tx_hold_write(tx, zp->z_id, off, len);
4663 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4664 zfs_sa_upgrade_txholds(tx, zp);
4665 err = dmu_tx_assign(tx, TXG_WAIT);
4671 if (zp->z_blksz < PAGE_SIZE) {
4672 for (i = 0; len > 0; off += tocopy, len -= tocopy, i++) {
4673 tocopy = len > PAGE_SIZE ? PAGE_SIZE : len;
4674 va = zfs_map_page(ma[i], &sf);
4675 dmu_write(zfsvfs->z_os, zp->z_id, off, tocopy, va, tx);
4679 err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, ma, tx);
4683 uint64_t mtime[2], ctime[2];
4684 sa_bulk_attr_t bulk[3];
4687 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
4689 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
4691 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
4693 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
4695 err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
4697 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0);
4699 zfs_vmobject_wlock(object);
4700 for (i = 0; i < ncount; i++) {
4701 rtvals[i] = zfs_vm_pagerret_ok;
4702 vm_page_undirty(ma[i]);
4704 zfs_vmobject_wunlock(object);
4705 VM_CNT_INC(v_vnodeout);
4706 VM_CNT_ADD(v_vnodepgsout, ncount);
4711 zfs_range_unlock(rl);
4712 if ((flags & (zfs_vm_pagerput_sync | zfs_vm_pagerput_inval)) != 0 ||
4713 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4714 zil_commit(zfsvfs->z_log, zp->z_id);
4720 zfs_freebsd_putpages(ap)
4721 struct vop_putpages_args /* {
4730 return (zfs_putpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_sync,
4735 zfs_freebsd_bmap(ap)
4736 struct vop_bmap_args /* {
4739 struct bufobj **a_bop;
4746 if (ap->a_bop != NULL)
4747 *ap->a_bop = &ap->a_vp->v_bufobj;
4748 if (ap->a_bnp != NULL)
4749 *ap->a_bnp = ap->a_bn;
4750 if (ap->a_runp != NULL)
4752 if (ap->a_runb != NULL)
4759 zfs_freebsd_open(ap)
4760 struct vop_open_args /* {
4763 struct ucred *a_cred;
4764 struct thread *a_td;
4767 vnode_t *vp = ap->a_vp;
4768 znode_t *zp = VTOZ(vp);
4771 error = zfs_open(&vp, ap->a_mode, ap->a_cred, NULL);
4773 vnode_create_vobject(vp, zp->z_size, ap->a_td);
4778 zfs_freebsd_close(ap)
4779 struct vop_close_args /* {
4782 struct ucred *a_cred;
4783 struct thread *a_td;
4787 return (zfs_close(ap->a_vp, ap->a_fflag, 1, 0, ap->a_cred, NULL));
4791 zfs_freebsd_ioctl(ap)
4792 struct vop_ioctl_args /* {
4802 return (zfs_ioctl(ap->a_vp, ap->a_command, (intptr_t)ap->a_data,
4803 ap->a_fflag, ap->a_cred, NULL, NULL));
4807 ioflags(int ioflags)
4811 if (ioflags & IO_APPEND)
4813 if (ioflags & IO_NDELAY)
4815 if (ioflags & IO_SYNC)
4816 flags |= (FSYNC | FDSYNC | FRSYNC);
4822 zfs_freebsd_read(ap)
4823 struct vop_read_args /* {
4827 struct ucred *a_cred;
4831 return (zfs_read(ap->a_vp, ap->a_uio, ioflags(ap->a_ioflag),
4836 zfs_freebsd_write(ap)
4837 struct vop_write_args /* {
4841 struct ucred *a_cred;
4845 return (zfs_write(ap->a_vp, ap->a_uio, ioflags(ap->a_ioflag),
4850 zfs_freebsd_access(ap)
4851 struct vop_access_args /* {
4853 accmode_t a_accmode;
4854 struct ucred *a_cred;
4855 struct thread *a_td;
4858 vnode_t *vp = ap->a_vp;
4859 znode_t *zp = VTOZ(vp);
4864 * ZFS itself only knowns about VREAD, VWRITE, VEXEC and VAPPEND,
4866 accmode = ap->a_accmode & (VREAD|VWRITE|VEXEC|VAPPEND);
4868 error = zfs_access(ap->a_vp, accmode, 0, ap->a_cred, NULL);
4871 * VADMIN has to be handled by vaccess().
4874 accmode = ap->a_accmode & ~(VREAD|VWRITE|VEXEC|VAPPEND);
4876 error = vaccess(vp->v_type, zp->z_mode, zp->z_uid,
4877 zp->z_gid, accmode, ap->a_cred, NULL);
4882 * For VEXEC, ensure that at least one execute bit is set for
4885 if (error == 0 && (ap->a_accmode & VEXEC) != 0 && vp->v_type != VDIR &&
4886 (zp->z_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
4894 zfs_freebsd_lookup(ap)
4895 struct vop_lookup_args /* {
4896 struct vnode *a_dvp;
4897 struct vnode **a_vpp;
4898 struct componentname *a_cnp;
4901 struct componentname *cnp = ap->a_cnp;
4902 char nm[NAME_MAX + 1];
4904 ASSERT(cnp->cn_namelen < sizeof(nm));
4905 strlcpy(nm, cnp->cn_nameptr, MIN(cnp->cn_namelen + 1, sizeof(nm)));
4907 return (zfs_lookup(ap->a_dvp, nm, ap->a_vpp, cnp, cnp->cn_nameiop,
4908 cnp->cn_cred, cnp->cn_thread, 0));
4912 zfs_cache_lookup(ap)
4913 struct vop_lookup_args /* {
4914 struct vnode *a_dvp;
4915 struct vnode **a_vpp;
4916 struct componentname *a_cnp;
4921 zfsvfs = ap->a_dvp->v_mount->mnt_data;
4922 if (zfsvfs->z_use_namecache)
4923 return (vfs_cache_lookup(ap));
4925 return (zfs_freebsd_lookup(ap));
4929 zfs_freebsd_create(ap)
4930 struct vop_create_args /* {
4931 struct vnode *a_dvp;
4932 struct vnode **a_vpp;
4933 struct componentname *a_cnp;
4934 struct vattr *a_vap;
4938 struct componentname *cnp = ap->a_cnp;
4939 vattr_t *vap = ap->a_vap;
4942 ASSERT(cnp->cn_flags & SAVENAME);
4944 vattr_init_mask(vap);
4945 mode = vap->va_mode & ALLPERMS;
4946 zfsvfs = ap->a_dvp->v_mount->mnt_data;
4948 error = zfs_create(ap->a_dvp, cnp->cn_nameptr, vap, !EXCL, mode,
4949 ap->a_vpp, cnp->cn_cred, cnp->cn_thread);
4950 if (zfsvfs->z_use_namecache &&
4951 error == 0 && (cnp->cn_flags & MAKEENTRY) != 0)
4952 cache_enter(ap->a_dvp, *ap->a_vpp, cnp);
4957 zfs_freebsd_remove(ap)
4958 struct vop_remove_args /* {
4959 struct vnode *a_dvp;
4961 struct componentname *a_cnp;
4965 ASSERT(ap->a_cnp->cn_flags & SAVENAME);
4967 return (zfs_remove(ap->a_dvp, ap->a_vp, ap->a_cnp->cn_nameptr,
4968 ap->a_cnp->cn_cred));
4972 zfs_freebsd_mkdir(ap)
4973 struct vop_mkdir_args /* {
4974 struct vnode *a_dvp;
4975 struct vnode **a_vpp;
4976 struct componentname *a_cnp;
4977 struct vattr *a_vap;
4980 vattr_t *vap = ap->a_vap;
4982 ASSERT(ap->a_cnp->cn_flags & SAVENAME);
4984 vattr_init_mask(vap);
4986 return (zfs_mkdir(ap->a_dvp, ap->a_cnp->cn_nameptr, vap, ap->a_vpp,
4987 ap->a_cnp->cn_cred));
4991 zfs_freebsd_rmdir(ap)
4992 struct vop_rmdir_args /* {
4993 struct vnode *a_dvp;
4995 struct componentname *a_cnp;
4998 struct componentname *cnp = ap->a_cnp;
5000 ASSERT(cnp->cn_flags & SAVENAME);
5002 return (zfs_rmdir(ap->a_dvp, ap->a_vp, cnp->cn_nameptr, cnp->cn_cred));
5006 zfs_freebsd_readdir(ap)
5007 struct vop_readdir_args /* {
5010 struct ucred *a_cred;
5017 return (zfs_readdir(ap->a_vp, ap->a_uio, ap->a_cred, ap->a_eofflag,
5018 ap->a_ncookies, ap->a_cookies));
5022 zfs_freebsd_fsync(ap)
5023 struct vop_fsync_args /* {
5026 struct thread *a_td;
5031 return (zfs_fsync(ap->a_vp, 0, ap->a_td->td_ucred, NULL));
5035 zfs_freebsd_getattr(ap)
5036 struct vop_getattr_args /* {
5038 struct vattr *a_vap;
5039 struct ucred *a_cred;
5042 vattr_t *vap = ap->a_vap;
5048 xvap.xva_vattr = *vap;
5049 xvap.xva_vattr.va_mask |= AT_XVATTR;
5051 /* Convert chflags into ZFS-type flags. */
5052 /* XXX: what about SF_SETTABLE?. */
5053 XVA_SET_REQ(&xvap, XAT_IMMUTABLE);
5054 XVA_SET_REQ(&xvap, XAT_APPENDONLY);
5055 XVA_SET_REQ(&xvap, XAT_NOUNLINK);
5056 XVA_SET_REQ(&xvap, XAT_NODUMP);
5057 XVA_SET_REQ(&xvap, XAT_READONLY);
5058 XVA_SET_REQ(&xvap, XAT_ARCHIVE);
5059 XVA_SET_REQ(&xvap, XAT_SYSTEM);
5060 XVA_SET_REQ(&xvap, XAT_HIDDEN);
5061 XVA_SET_REQ(&xvap, XAT_REPARSE);
5062 XVA_SET_REQ(&xvap, XAT_OFFLINE);
5063 XVA_SET_REQ(&xvap, XAT_SPARSE);
5065 error = zfs_getattr(ap->a_vp, (vattr_t *)&xvap, 0, ap->a_cred, NULL);
5069 /* Convert ZFS xattr into chflags. */
5070 #define FLAG_CHECK(fflag, xflag, xfield) do { \
5071 if (XVA_ISSET_RTN(&xvap, (xflag)) && (xfield) != 0) \
5072 fflags |= (fflag); \
5074 FLAG_CHECK(SF_IMMUTABLE, XAT_IMMUTABLE,
5075 xvap.xva_xoptattrs.xoa_immutable);
5076 FLAG_CHECK(SF_APPEND, XAT_APPENDONLY,
5077 xvap.xva_xoptattrs.xoa_appendonly);
5078 FLAG_CHECK(SF_NOUNLINK, XAT_NOUNLINK,
5079 xvap.xva_xoptattrs.xoa_nounlink);
5080 FLAG_CHECK(UF_ARCHIVE, XAT_ARCHIVE,
5081 xvap.xva_xoptattrs.xoa_archive);
5082 FLAG_CHECK(UF_NODUMP, XAT_NODUMP,
5083 xvap.xva_xoptattrs.xoa_nodump);
5084 FLAG_CHECK(UF_READONLY, XAT_READONLY,
5085 xvap.xva_xoptattrs.xoa_readonly);
5086 FLAG_CHECK(UF_SYSTEM, XAT_SYSTEM,
5087 xvap.xva_xoptattrs.xoa_system);
5088 FLAG_CHECK(UF_HIDDEN, XAT_HIDDEN,
5089 xvap.xva_xoptattrs.xoa_hidden);
5090 FLAG_CHECK(UF_REPARSE, XAT_REPARSE,
5091 xvap.xva_xoptattrs.xoa_reparse);
5092 FLAG_CHECK(UF_OFFLINE, XAT_OFFLINE,
5093 xvap.xva_xoptattrs.xoa_offline);
5094 FLAG_CHECK(UF_SPARSE, XAT_SPARSE,
5095 xvap.xva_xoptattrs.xoa_sparse);
5098 *vap = xvap.xva_vattr;
5099 vap->va_flags = fflags;
5104 zfs_freebsd_setattr(ap)
5105 struct vop_setattr_args /* {
5107 struct vattr *a_vap;
5108 struct ucred *a_cred;
5111 vnode_t *vp = ap->a_vp;
5112 vattr_t *vap = ap->a_vap;
5113 cred_t *cred = ap->a_cred;
5118 vattr_init_mask(vap);
5119 vap->va_mask &= ~AT_NOSET;
5122 xvap.xva_vattr = *vap;
5124 zflags = VTOZ(vp)->z_pflags;
5126 if (vap->va_flags != VNOVAL) {
5127 zfsvfs_t *zfsvfs = VTOZ(vp)->z_zfsvfs;
5130 if (zfsvfs->z_use_fuids == B_FALSE)
5131 return (EOPNOTSUPP);
5133 fflags = vap->va_flags;
5136 * We need to figure out whether it makes sense to allow
5137 * UF_REPARSE through, since we don't really have other
5138 * facilities to handle reparse points and zfs_setattr()
5139 * doesn't currently allow setting that attribute anyway.
5141 if ((fflags & ~(SF_IMMUTABLE|SF_APPEND|SF_NOUNLINK|UF_ARCHIVE|
5142 UF_NODUMP|UF_SYSTEM|UF_HIDDEN|UF_READONLY|UF_REPARSE|
5143 UF_OFFLINE|UF_SPARSE)) != 0)
5144 return (EOPNOTSUPP);
5146 * Unprivileged processes are not permitted to unset system
5147 * flags, or modify flags if any system flags are set.
5148 * Privileged non-jail processes may not modify system flags
5149 * if securelevel > 0 and any existing system flags are set.
5150 * Privileged jail processes behave like privileged non-jail
5151 * processes if the PR_ALLOW_CHFLAGS permission bit is set;
5152 * otherwise, they behave like unprivileged processes.
5154 if (secpolicy_fs_owner(vp->v_mount, cred) == 0 ||
5155 priv_check_cred(cred, PRIV_VFS_SYSFLAGS) == 0) {
5157 (ZFS_IMMUTABLE | ZFS_APPENDONLY | ZFS_NOUNLINK)) {
5158 error = securelevel_gt(cred, 0);
5164 * Callers may only modify the file flags on objects they
5165 * have VADMIN rights for.
5167 if ((error = VOP_ACCESS(vp, VADMIN, cred, curthread)) != 0)
5170 (ZFS_IMMUTABLE | ZFS_APPENDONLY | ZFS_NOUNLINK)) {
5174 (SF_IMMUTABLE | SF_APPEND | SF_NOUNLINK)) {
5179 #define FLAG_CHANGE(fflag, zflag, xflag, xfield) do { \
5180 if (((fflags & (fflag)) && !(zflags & (zflag))) || \
5181 ((zflags & (zflag)) && !(fflags & (fflag)))) { \
5182 XVA_SET_REQ(&xvap, (xflag)); \
5183 (xfield) = ((fflags & (fflag)) != 0); \
5186 /* Convert chflags into ZFS-type flags. */
5187 /* XXX: what about SF_SETTABLE?. */
5188 FLAG_CHANGE(SF_IMMUTABLE, ZFS_IMMUTABLE, XAT_IMMUTABLE,
5189 xvap.xva_xoptattrs.xoa_immutable);
5190 FLAG_CHANGE(SF_APPEND, ZFS_APPENDONLY, XAT_APPENDONLY,
5191 xvap.xva_xoptattrs.xoa_appendonly);
5192 FLAG_CHANGE(SF_NOUNLINK, ZFS_NOUNLINK, XAT_NOUNLINK,
5193 xvap.xva_xoptattrs.xoa_nounlink);
5194 FLAG_CHANGE(UF_ARCHIVE, ZFS_ARCHIVE, XAT_ARCHIVE,
5195 xvap.xva_xoptattrs.xoa_archive);
5196 FLAG_CHANGE(UF_NODUMP, ZFS_NODUMP, XAT_NODUMP,
5197 xvap.xva_xoptattrs.xoa_nodump);
5198 FLAG_CHANGE(UF_READONLY, ZFS_READONLY, XAT_READONLY,
5199 xvap.xva_xoptattrs.xoa_readonly);
5200 FLAG_CHANGE(UF_SYSTEM, ZFS_SYSTEM, XAT_SYSTEM,
5201 xvap.xva_xoptattrs.xoa_system);
5202 FLAG_CHANGE(UF_HIDDEN, ZFS_HIDDEN, XAT_HIDDEN,
5203 xvap.xva_xoptattrs.xoa_hidden);
5204 FLAG_CHANGE(UF_REPARSE, ZFS_REPARSE, XAT_REPARSE,
5205 xvap.xva_xoptattrs.xoa_reparse);
5206 FLAG_CHANGE(UF_OFFLINE, ZFS_OFFLINE, XAT_OFFLINE,
5207 xvap.xva_xoptattrs.xoa_offline);
5208 FLAG_CHANGE(UF_SPARSE, ZFS_SPARSE, XAT_SPARSE,
5209 xvap.xva_xoptattrs.xoa_sparse);
5212 if (vap->va_birthtime.tv_sec != VNOVAL) {
5213 xvap.xva_vattr.va_mask |= AT_XVATTR;
5214 XVA_SET_REQ(&xvap, XAT_CREATETIME);
5216 return (zfs_setattr(vp, (vattr_t *)&xvap, 0, cred, NULL));
5220 zfs_freebsd_rename(ap)
5221 struct vop_rename_args /* {
5222 struct vnode *a_fdvp;
5223 struct vnode *a_fvp;
5224 struct componentname *a_fcnp;
5225 struct vnode *a_tdvp;
5226 struct vnode *a_tvp;
5227 struct componentname *a_tcnp;
5230 vnode_t *fdvp = ap->a_fdvp;
5231 vnode_t *fvp = ap->a_fvp;
5232 vnode_t *tdvp = ap->a_tdvp;
5233 vnode_t *tvp = ap->a_tvp;
5236 ASSERT(ap->a_fcnp->cn_flags & (SAVENAME|SAVESTART));
5237 ASSERT(ap->a_tcnp->cn_flags & (SAVENAME|SAVESTART));
5239 error = zfs_rename(fdvp, &fvp, ap->a_fcnp, tdvp, &tvp,
5240 ap->a_tcnp, ap->a_fcnp->cn_cred);
5252 zfs_freebsd_symlink(ap)
5253 struct vop_symlink_args /* {
5254 struct vnode *a_dvp;
5255 struct vnode **a_vpp;
5256 struct componentname *a_cnp;
5257 struct vattr *a_vap;
5261 struct componentname *cnp = ap->a_cnp;
5262 vattr_t *vap = ap->a_vap;
5264 ASSERT(cnp->cn_flags & SAVENAME);
5266 vap->va_type = VLNK; /* FreeBSD: Syscall only sets va_mode. */
5267 vattr_init_mask(vap);
5269 return (zfs_symlink(ap->a_dvp, ap->a_vpp, cnp->cn_nameptr, vap,
5270 __DECONST(char *, ap->a_target), cnp->cn_cred, cnp->cn_thread));
5274 zfs_freebsd_readlink(ap)
5275 struct vop_readlink_args /* {
5278 struct ucred *a_cred;
5282 return (zfs_readlink(ap->a_vp, ap->a_uio, ap->a_cred, NULL));
5286 zfs_freebsd_link(ap)
5287 struct vop_link_args /* {
5288 struct vnode *a_tdvp;
5290 struct componentname *a_cnp;
5293 struct componentname *cnp = ap->a_cnp;
5294 vnode_t *vp = ap->a_vp;
5295 vnode_t *tdvp = ap->a_tdvp;
5297 if (tdvp->v_mount != vp->v_mount)
5300 ASSERT(cnp->cn_flags & SAVENAME);
5302 return (zfs_link(tdvp, vp, cnp->cn_nameptr, cnp->cn_cred, NULL, 0));
5306 zfs_freebsd_inactive(ap)
5307 struct vop_inactive_args /* {
5309 struct thread *a_td;
5312 vnode_t *vp = ap->a_vp;
5314 zfs_inactive(vp, ap->a_td->td_ucred, NULL);
5319 zfs_freebsd_reclaim(ap)
5320 struct vop_reclaim_args /* {
5322 struct thread *a_td;
5325 vnode_t *vp = ap->a_vp;
5326 znode_t *zp = VTOZ(vp);
5327 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5332 * z_teardown_inactive_lock protects from a race with
5333 * zfs_znode_dmu_fini in zfsvfs_teardown during
5336 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
5337 if (zp->z_sa_hdl == NULL)
5341 rw_exit(&zfsvfs->z_teardown_inactive_lock);
5349 struct vop_fid_args /* {
5355 return (zfs_fid(ap->a_vp, (void *)ap->a_fid, NULL));
5359 zfs_freebsd_pathconf(ap)
5360 struct vop_pathconf_args /* {
5363 register_t *a_retval;
5369 error = zfs_pathconf(ap->a_vp, ap->a_name, &val, curthread->td_ucred, NULL);
5371 *ap->a_retval = val;
5374 if (error != EOPNOTSUPP)
5377 switch (ap->a_name) {
5379 *ap->a_retval = NAME_MAX;
5382 if (ap->a_vp->v_type == VDIR || ap->a_vp->v_type == VFIFO) {
5383 *ap->a_retval = PIPE_BUF;
5388 return (vop_stdpathconf(ap));
5393 * FreeBSD's extended attributes namespace defines file name prefix for ZFS'
5394 * extended attribute name:
5397 * system freebsd:system:
5398 * user (none, can be used to access ZFS fsattr(5) attributes
5399 * created on Solaris)
5402 zfs_create_attrname(int attrnamespace, const char *name, char *attrname,
5405 const char *namespace, *prefix, *suffix;
5407 /* We don't allow '/' character in attribute name. */
5408 if (strchr(name, '/') != NULL)
5410 /* We don't allow attribute names that start with "freebsd:" string. */
5411 if (strncmp(name, "freebsd:", 8) == 0)
5414 bzero(attrname, size);
5416 switch (attrnamespace) {
5417 case EXTATTR_NAMESPACE_USER:
5419 prefix = "freebsd:";
5420 namespace = EXTATTR_NAMESPACE_USER_STRING;
5424 * This is the default namespace by which we can access all
5425 * attributes created on Solaris.
5427 prefix = namespace = suffix = "";
5430 case EXTATTR_NAMESPACE_SYSTEM:
5431 prefix = "freebsd:";
5432 namespace = EXTATTR_NAMESPACE_SYSTEM_STRING;
5435 case EXTATTR_NAMESPACE_EMPTY:
5439 if (snprintf(attrname, size, "%s%s%s%s", prefix, namespace, suffix,
5441 return (ENAMETOOLONG);
5447 * Vnode operating to retrieve a named extended attribute.
5450 zfs_getextattr(struct vop_getextattr_args *ap)
5453 IN struct vnode *a_vp;
5454 IN int a_attrnamespace;
5455 IN const char *a_name;
5456 INOUT struct uio *a_uio;
5458 IN struct ucred *a_cred;
5459 IN struct thread *a_td;
5463 zfsvfs_t *zfsvfs = VTOZ(ap->a_vp)->z_zfsvfs;
5464 struct thread *td = ap->a_td;
5465 struct nameidata nd;
5468 vnode_t *xvp = NULL, *vp;
5471 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
5472 ap->a_cred, ap->a_td, VREAD);
5476 error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
5483 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
5491 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname,
5493 error = vn_open_cred(&nd, &flags, 0, 0, ap->a_cred, NULL);
5495 NDFREE(&nd, NDF_ONLY_PNBUF);
5498 if (error == ENOENT)
5503 if (ap->a_size != NULL) {
5504 error = VOP_GETATTR(vp, &va, ap->a_cred);
5506 *ap->a_size = (size_t)va.va_size;
5507 } else if (ap->a_uio != NULL)
5508 error = VOP_READ(vp, ap->a_uio, IO_UNIT, ap->a_cred);
5511 vn_close(vp, flags, ap->a_cred, td);
5518 * Vnode operation to remove a named attribute.
5521 zfs_deleteextattr(struct vop_deleteextattr_args *ap)
5524 IN struct vnode *a_vp;
5525 IN int a_attrnamespace;
5526 IN const char *a_name;
5527 IN struct ucred *a_cred;
5528 IN struct thread *a_td;
5532 zfsvfs_t *zfsvfs = VTOZ(ap->a_vp)->z_zfsvfs;
5533 struct thread *td = ap->a_td;
5534 struct nameidata nd;
5537 vnode_t *xvp = NULL, *vp;
5540 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
5541 ap->a_cred, ap->a_td, VWRITE);
5545 error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
5552 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
5559 NDINIT_ATVP(&nd, DELETE, NOFOLLOW | LOCKPARENT | LOCKLEAF,
5560 UIO_SYSSPACE, attrname, xvp, td);
5565 NDFREE(&nd, NDF_ONLY_PNBUF);
5566 if (error == ENOENT)
5571 error = VOP_REMOVE(nd.ni_dvp, vp, &nd.ni_cnd);
5572 NDFREE(&nd, NDF_ONLY_PNBUF);
5575 if (vp == nd.ni_dvp)
5585 * Vnode operation to set a named attribute.
5588 zfs_setextattr(struct vop_setextattr_args *ap)
5591 IN struct vnode *a_vp;
5592 IN int a_attrnamespace;
5593 IN const char *a_name;
5594 INOUT struct uio *a_uio;
5595 IN struct ucred *a_cred;
5596 IN struct thread *a_td;
5600 zfsvfs_t *zfsvfs = VTOZ(ap->a_vp)->z_zfsvfs;
5601 struct thread *td = ap->a_td;
5602 struct nameidata nd;
5605 vnode_t *xvp = NULL, *vp;
5608 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
5609 ap->a_cred, ap->a_td, VWRITE);
5613 error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
5620 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
5621 LOOKUP_XATTR | CREATE_XATTR_DIR);
5627 flags = FFLAGS(O_WRONLY | O_CREAT);
5628 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname,
5630 error = vn_open_cred(&nd, &flags, 0600, 0, ap->a_cred, NULL);
5632 NDFREE(&nd, NDF_ONLY_PNBUF);
5640 error = VOP_SETATTR(vp, &va, ap->a_cred);
5642 VOP_WRITE(vp, ap->a_uio, IO_UNIT, ap->a_cred);
5645 vn_close(vp, flags, ap->a_cred, td);
5652 * Vnode operation to retrieve extended attributes on a vnode.
5655 zfs_listextattr(struct vop_listextattr_args *ap)
5658 IN struct vnode *a_vp;
5659 IN int a_attrnamespace;
5660 INOUT struct uio *a_uio;
5662 IN struct ucred *a_cred;
5663 IN struct thread *a_td;
5667 zfsvfs_t *zfsvfs = VTOZ(ap->a_vp)->z_zfsvfs;
5668 struct thread *td = ap->a_td;
5669 struct nameidata nd;
5670 char attrprefix[16];
5671 u_char dirbuf[sizeof(struct dirent)];
5674 struct uio auio, *uio = ap->a_uio;
5675 size_t *sizep = ap->a_size;
5677 vnode_t *xvp = NULL, *vp;
5678 int done, error, eof, pos;
5680 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
5681 ap->a_cred, ap->a_td, VREAD);
5685 error = zfs_create_attrname(ap->a_attrnamespace, "", attrprefix,
5686 sizeof(attrprefix));
5689 plen = strlen(attrprefix);
5696 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
5701 * ENOATTR means that the EA directory does not yet exist,
5702 * i.e. there are no extended attributes there.
5704 if (error == ENOATTR)
5709 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | LOCKSHARED,
5710 UIO_SYSSPACE, ".", xvp, td);
5713 NDFREE(&nd, NDF_ONLY_PNBUF);
5719 auio.uio_iov = &aiov;
5720 auio.uio_iovcnt = 1;
5721 auio.uio_segflg = UIO_SYSSPACE;
5723 auio.uio_rw = UIO_READ;
5724 auio.uio_offset = 0;
5729 aiov.iov_base = (void *)dirbuf;
5730 aiov.iov_len = sizeof(dirbuf);
5731 auio.uio_resid = sizeof(dirbuf);
5732 error = VOP_READDIR(vp, &auio, ap->a_cred, &eof, NULL, NULL);
5733 done = sizeof(dirbuf) - auio.uio_resid;
5736 for (pos = 0; pos < done;) {
5737 dp = (struct dirent *)(dirbuf + pos);
5738 pos += dp->d_reclen;
5740 * XXX: Temporarily we also accept DT_UNKNOWN, as this
5741 * is what we get when attribute was created on Solaris.
5743 if (dp->d_type != DT_REG && dp->d_type != DT_UNKNOWN)
5745 if (plen == 0 && strncmp(dp->d_name, "freebsd:", 8) == 0)
5747 else if (strncmp(dp->d_name, attrprefix, plen) != 0)
5749 nlen = dp->d_namlen - plen;
5752 else if (uio != NULL) {
5754 * Format of extattr name entry is one byte for
5755 * length and the rest for name.
5757 error = uiomove(&nlen, 1, uio->uio_rw, uio);
5759 error = uiomove(dp->d_name + plen, nlen,
5766 } while (!eof && error == 0);
5775 zfs_freebsd_getacl(ap)
5776 struct vop_getacl_args /* {
5785 vsecattr_t vsecattr;
5787 if (ap->a_type != ACL_TYPE_NFS4)
5790 vsecattr.vsa_mask = VSA_ACE | VSA_ACECNT;
5791 if (error = zfs_getsecattr(ap->a_vp, &vsecattr, 0, ap->a_cred, NULL))
5794 error = acl_from_aces(ap->a_aclp, vsecattr.vsa_aclentp, vsecattr.vsa_aclcnt);
5795 if (vsecattr.vsa_aclentp != NULL)
5796 kmem_free(vsecattr.vsa_aclentp, vsecattr.vsa_aclentsz);
5802 zfs_freebsd_setacl(ap)
5803 struct vop_setacl_args /* {
5812 vsecattr_t vsecattr;
5813 int aclbsize; /* size of acl list in bytes */
5816 if (ap->a_type != ACL_TYPE_NFS4)
5819 if (ap->a_aclp == NULL)
5822 if (ap->a_aclp->acl_cnt < 1 || ap->a_aclp->acl_cnt > MAX_ACL_ENTRIES)
5826 * With NFSv4 ACLs, chmod(2) may need to add additional entries,
5827 * splitting every entry into two and appending "canonical six"
5828 * entries at the end. Don't allow for setting an ACL that would
5829 * cause chmod(2) to run out of ACL entries.
5831 if (ap->a_aclp->acl_cnt * 2 + 6 > ACL_MAX_ENTRIES)
5834 error = acl_nfs4_check(ap->a_aclp, ap->a_vp->v_type == VDIR);
5838 vsecattr.vsa_mask = VSA_ACE;
5839 aclbsize = ap->a_aclp->acl_cnt * sizeof(ace_t);
5840 vsecattr.vsa_aclentp = kmem_alloc(aclbsize, KM_SLEEP);
5841 aaclp = vsecattr.vsa_aclentp;
5842 vsecattr.vsa_aclentsz = aclbsize;
5844 aces_from_acl(vsecattr.vsa_aclentp, &vsecattr.vsa_aclcnt, ap->a_aclp);
5845 error = zfs_setsecattr(ap->a_vp, &vsecattr, 0, ap->a_cred, NULL);
5846 kmem_free(aaclp, aclbsize);
5852 zfs_freebsd_aclcheck(ap)
5853 struct vop_aclcheck_args /* {
5862 return (EOPNOTSUPP);
5866 zfs_vptocnp(struct vop_vptocnp_args *ap)
5868 vnode_t *covered_vp;
5869 vnode_t *vp = ap->a_vp;;
5870 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
5871 znode_t *zp = VTOZ(vp);
5880 * If we are a snapshot mounted under .zfs, run the operation
5881 * on the covered vnode.
5883 if (zp->z_id != zfsvfs->z_root || zfsvfs->z_parent == zfsvfs) {
5884 char name[MAXNAMLEN + 1];
5888 error = zfs_znode_parent_and_name(zp, &dzp, name);
5891 if (*ap->a_buflen < len)
5892 error = SET_ERROR(ENOMEM);
5895 *ap->a_buflen -= len;
5896 bcopy(name, ap->a_buf + *ap->a_buflen, len);
5897 *ap->a_vpp = ZTOV(dzp);
5904 covered_vp = vp->v_mount->mnt_vnodecovered;
5905 vs = vget_prep(covered_vp);
5906 ltype = VOP_ISLOCKED(vp);
5908 error = vget_finish(covered_vp, LK_SHARED, vs);
5910 error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_cred,
5911 ap->a_buf, ap->a_buflen);
5914 vn_lock(vp, ltype | LK_RETRY);
5915 if ((vp->v_iflag & VI_DOOMED) != 0)
5916 error = SET_ERROR(ENOENT);
5923 struct vop_lock1_args /* {
5934 err = vop_stdlock(ap);
5935 if (err == 0 && (ap->a_flags & LK_NOWAIT) == 0) {
5938 if (vp->v_mount != NULL && (vp->v_iflag & VI_DOOMED) == 0 &&
5939 zp != NULL && (zp->z_pflags & ZFS_XATTR) == 0)
5940 VERIFY(!RRM_LOCK_HELD(&zp->z_zfsvfs->z_teardown_lock));
5946 struct vop_vector zfs_vnodeops;
5947 struct vop_vector zfs_fifoops;
5948 struct vop_vector zfs_shareops;
5950 struct vop_vector zfs_vnodeops = {
5951 .vop_default = &default_vnodeops,
5952 .vop_inactive = zfs_freebsd_inactive,
5953 .vop_reclaim = zfs_freebsd_reclaim,
5954 .vop_access = zfs_freebsd_access,
5955 .vop_allocate = VOP_EINVAL,
5956 .vop_lookup = zfs_cache_lookup,
5957 .vop_cachedlookup = zfs_freebsd_lookup,
5958 .vop_getattr = zfs_freebsd_getattr,
5959 .vop_setattr = zfs_freebsd_setattr,
5960 .vop_create = zfs_freebsd_create,
5961 .vop_mknod = zfs_freebsd_create,
5962 .vop_mkdir = zfs_freebsd_mkdir,
5963 .vop_readdir = zfs_freebsd_readdir,
5964 .vop_fsync = zfs_freebsd_fsync,
5965 .vop_open = zfs_freebsd_open,
5966 .vop_close = zfs_freebsd_close,
5967 .vop_rmdir = zfs_freebsd_rmdir,
5968 .vop_ioctl = zfs_freebsd_ioctl,
5969 .vop_link = zfs_freebsd_link,
5970 .vop_symlink = zfs_freebsd_symlink,
5971 .vop_readlink = zfs_freebsd_readlink,
5972 .vop_read = zfs_freebsd_read,
5973 .vop_write = zfs_freebsd_write,
5974 .vop_remove = zfs_freebsd_remove,
5975 .vop_rename = zfs_freebsd_rename,
5976 .vop_pathconf = zfs_freebsd_pathconf,
5977 .vop_bmap = zfs_freebsd_bmap,
5978 .vop_fid = zfs_freebsd_fid,
5979 .vop_getextattr = zfs_getextattr,
5980 .vop_deleteextattr = zfs_deleteextattr,
5981 .vop_setextattr = zfs_setextattr,
5982 .vop_listextattr = zfs_listextattr,
5983 .vop_getacl = zfs_freebsd_getacl,
5984 .vop_setacl = zfs_freebsd_setacl,
5985 .vop_aclcheck = zfs_freebsd_aclcheck,
5986 .vop_getpages = zfs_freebsd_getpages,
5987 .vop_putpages = zfs_freebsd_putpages,
5988 .vop_vptocnp = zfs_vptocnp,
5990 .vop_lock1 = zfs_lock,
5994 struct vop_vector zfs_fifoops = {
5995 .vop_default = &fifo_specops,
5996 .vop_fsync = zfs_freebsd_fsync,
5997 .vop_access = zfs_freebsd_access,
5998 .vop_getattr = zfs_freebsd_getattr,
5999 .vop_inactive = zfs_freebsd_inactive,
6000 .vop_read = VOP_PANIC,
6001 .vop_reclaim = zfs_freebsd_reclaim,
6002 .vop_setattr = zfs_freebsd_setattr,
6003 .vop_write = VOP_PANIC,
6004 .vop_pathconf = zfs_freebsd_pathconf,
6005 .vop_fid = zfs_freebsd_fid,
6006 .vop_getacl = zfs_freebsd_getacl,
6007 .vop_setacl = zfs_freebsd_setacl,
6008 .vop_aclcheck = zfs_freebsd_aclcheck,
6012 * special share hidden files vnode operations template
6014 struct vop_vector zfs_shareops = {
6015 .vop_default = &default_vnodeops,
6016 .vop_access = zfs_freebsd_access,
6017 .vop_inactive = zfs_freebsd_inactive,
6018 .vop_reclaim = zfs_freebsd_reclaim,
6019 .vop_fid = zfs_freebsd_fid,
6020 .vop_pathconf = zfs_freebsd_pathconf,