4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Integros [integros.com]
26 * Copyright 2017 Nexenta Systems, Inc.
29 /* Portions Copyright 2007 Jeremy Teo */
30 /* Portions Copyright 2010 Robert Milkowski */
32 #include <sys/types.h>
33 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/sysmacros.h>
37 #include <sys/resource.h>
40 #include <sys/vnode.h>
44 #include <sys/taskq.h>
46 #include <sys/atomic.h>
47 #include <sys/namei.h>
49 #include <sys/cmn_err.h>
50 #include <sys/errno.h>
51 #include <sys/unistd.h>
52 #include <sys/zfs_dir.h>
53 #include <sys/zfs_ioctl.h>
54 #include <sys/fs/zfs.h>
56 #include <sys/dmu_objset.h>
62 #include <sys/dirent.h>
63 #include <sys/policy.h>
64 #include <sys/sunddi.h>
65 #include <sys/filio.h>
67 #include <sys/zfs_ctldir.h>
68 #include <sys/zfs_fuid.h>
69 #include <sys/zfs_sa.h>
70 #include <sys/zfs_rlock.h>
71 #include <sys/extdirent.h>
72 #include <sys/kidmap.h>
75 #include <sys/sched.h>
77 #include <sys/vmmeter.h>
78 #include <vm/vm_param.h>
84 * Each vnode op performs some logical unit of work. To do this, the ZPL must
85 * properly lock its in-core state, create a DMU transaction, do the work,
86 * record this work in the intent log (ZIL), commit the DMU transaction,
87 * and wait for the intent log to commit if it is a synchronous operation.
88 * Moreover, the vnode ops must work in both normal and log replay context.
89 * The ordering of events is important to avoid deadlocks and references
90 * to freed memory. The example below illustrates the following Big Rules:
92 * (1) A check must be made in each zfs thread for a mounted file system.
93 * This is done avoiding races using ZFS_ENTER(zfsvfs).
94 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
95 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
96 * can return EIO from the calling function.
98 * (2) VN_RELE() should always be the last thing except for zil_commit()
99 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
100 * First, if it's the last reference, the vnode/znode
101 * can be freed, so the zp may point to freed memory. Second, the last
102 * reference will call zfs_zinactive(), which may induce a lot of work --
103 * pushing cached pages (which acquires range locks) and syncing out
104 * cached atime changes. Third, zfs_zinactive() may require a new tx,
105 * which could deadlock the system if you were already holding one.
106 * If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
108 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
109 * as they can span dmu_tx_assign() calls.
111 * (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
112 * dmu_tx_assign(). This is critical because we don't want to block
113 * while holding locks.
115 * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
116 * reduces lock contention and CPU usage when we must wait (note that if
117 * throughput is constrained by the storage, nearly every transaction
120 * Note, in particular, that if a lock is sometimes acquired before
121 * the tx assigns, and sometimes after (e.g. z_lock), then failing
122 * to use a non-blocking assign can deadlock the system. The scenario:
124 * Thread A has grabbed a lock before calling dmu_tx_assign().
125 * Thread B is in an already-assigned tx, and blocks for this lock.
126 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
127 * forever, because the previous txg can't quiesce until B's tx commits.
129 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
130 * then drop all locks, call dmu_tx_wait(), and try again. On subsequent
131 * calls to dmu_tx_assign(), pass TXG_WAITED rather than TXG_NOWAIT,
132 * to indicate that this operation has already called dmu_tx_wait().
133 * This will ensure that we don't retry forever, waiting a short bit
136 * (5) If the operation succeeded, generate the intent log entry for it
137 * before dropping locks. This ensures that the ordering of events
138 * in the intent log matches the order in which they actually occurred.
139 * During ZIL replay the zfs_log_* functions will update the sequence
140 * number to indicate the zil transaction has replayed.
142 * (6) At the end of each vnode op, the DMU tx must always commit,
143 * regardless of whether there were any errors.
145 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
146 * to ensure that synchronous semantics are provided when necessary.
148 * In general, this is how things should be ordered in each vnode op:
150 * ZFS_ENTER(zfsvfs); // exit if unmounted
152 * zfs_dirent_lookup(&dl, ...) // lock directory entry (may VN_HOLD())
153 * rw_enter(...); // grab any other locks you need
154 * tx = dmu_tx_create(...); // get DMU tx
155 * dmu_tx_hold_*(); // hold each object you might modify
156 * error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
158 * rw_exit(...); // drop locks
159 * zfs_dirent_unlock(dl); // unlock directory entry
160 * VN_RELE(...); // release held vnodes
161 * if (error == ERESTART) {
167 * dmu_tx_abort(tx); // abort DMU tx
168 * ZFS_EXIT(zfsvfs); // finished in zfs
169 * return (error); // really out of space
171 * error = do_real_work(); // do whatever this VOP does
173 * zfs_log_*(...); // on success, make ZIL entry
174 * dmu_tx_commit(tx); // commit DMU tx -- error or not
175 * rw_exit(...); // drop locks
176 * zfs_dirent_unlock(dl); // unlock directory entry
177 * VN_RELE(...); // release held vnodes
178 * zil_commit(zilog, foid); // synchronous when necessary
179 * ZFS_EXIT(zfsvfs); // finished in zfs
180 * return (error); // done, report error
185 zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
187 znode_t *zp = VTOZ(*vpp);
188 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
193 if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
194 ((flag & FAPPEND) == 0)) {
196 return (SET_ERROR(EPERM));
199 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
200 ZTOV(zp)->v_type == VREG &&
201 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
202 if (fs_vscan(*vpp, cr, 0) != 0) {
204 return (SET_ERROR(EACCES));
208 /* Keep a count of the synchronous opens in the znode */
209 if (flag & (FSYNC | FDSYNC))
210 atomic_inc_32(&zp->z_sync_cnt);
218 zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
219 caller_context_t *ct)
221 znode_t *zp = VTOZ(vp);
222 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
225 * Clean up any locks held by this process on the vp.
227 cleanlocks(vp, ddi_get_pid(), 0);
228 cleanshares(vp, ddi_get_pid());
233 /* Decrement the synchronous opens in the znode */
234 if ((flag & (FSYNC | FDSYNC)) && (count == 1))
235 atomic_dec_32(&zp->z_sync_cnt);
237 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
238 ZTOV(zp)->v_type == VREG &&
239 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
240 VERIFY(fs_vscan(vp, cr, 1) == 0);
247 * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and
248 * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter.
251 zfs_holey(vnode_t *vp, u_long cmd, offset_t *off)
253 znode_t *zp = VTOZ(vp);
254 uint64_t noff = (uint64_t)*off; /* new offset */
259 file_sz = zp->z_size;
260 if (noff >= file_sz) {
261 return (SET_ERROR(ENXIO));
264 if (cmd == _FIO_SEEK_HOLE)
269 error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff);
272 return (SET_ERROR(ENXIO));
275 * We could find a hole that begins after the logical end-of-file,
276 * because dmu_offset_next() only works on whole blocks. If the
277 * EOF falls mid-block, then indicate that the "virtual hole"
278 * at the end of the file begins at the logical EOF, rather than
279 * at the end of the last block.
281 if (noff > file_sz) {
294 zfs_ioctl(vnode_t *vp, u_long com, intptr_t data, int flag, cred_t *cred,
295 int *rvalp, caller_context_t *ct)
299 dmu_object_info_t doi;
310 * The following two ioctls are used by bfu. Faking out,
311 * necessary to avoid bfu errors.
324 if (ddi_copyin((void *)data, &off, sizeof (off), flag))
325 return (SET_ERROR(EFAULT));
327 off = *(offset_t *)data;
330 zfsvfs = zp->z_zfsvfs;
334 /* offset parameter is in/out */
335 error = zfs_holey(vp, com, &off);
340 if (ddi_copyout(&off, (void *)data, sizeof (off), flag))
341 return (SET_ERROR(EFAULT));
343 *(offset_t *)data = off;
348 case _FIO_COUNT_FILLED:
351 * _FIO_COUNT_FILLED adds a new ioctl command which
352 * exposes the number of filled blocks in a
356 zfsvfs = zp->z_zfsvfs;
361 * Wait for all dirty blocks for this object
362 * to get synced out to disk, and the DMU info
365 error = dmu_object_wait_synced(zfsvfs->z_os, zp->z_id);
372 * Retrieve fill count from DMU object.
374 error = dmu_object_info(zfsvfs->z_os, zp->z_id, &doi);
380 ndata = doi.doi_fill_count;
383 if (ddi_copyout(&ndata, (void *)data, sizeof (ndata), flag))
384 return (SET_ERROR(EFAULT));
389 return (SET_ERROR(ENOTTY));
393 page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
400 * At present vm_page_clear_dirty extends the cleared range to DEV_BSIZE
401 * aligned boundaries, if the range is not aligned. As a result a
402 * DEV_BSIZE subrange with partially dirty data may get marked as clean.
403 * It may happen that all DEV_BSIZE subranges are marked clean and thus
404 * the whole page would be considred clean despite have some dirty data.
405 * For this reason we should shrink the range to DEV_BSIZE aligned
406 * boundaries before calling vm_page_clear_dirty.
408 end = rounddown2(off + nbytes, DEV_BSIZE);
409 off = roundup2(off, DEV_BSIZE);
413 zfs_vmobject_assert_wlocked(obj);
416 if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
418 if (vm_page_xbusied(pp)) {
420 * Reference the page before unlocking and
421 * sleeping so that the page daemon is less
422 * likely to reclaim it.
424 vm_page_reference(pp);
426 zfs_vmobject_wunlock(obj);
427 vm_page_busy_sleep(pp, "zfsmwb", true);
428 zfs_vmobject_wlock(obj);
432 } else if (pp != NULL) {
438 ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
439 vm_object_pip_add(obj, 1);
440 pmap_remove_write(pp);
442 vm_page_clear_dirty(pp, off, nbytes);
450 page_unbusy(vm_page_t pp)
454 vm_object_pip_subtract(pp->object, 1);
458 page_hold(vnode_t *vp, int64_t start)
464 zfs_vmobject_assert_wlocked(obj);
467 if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
469 if (vm_page_xbusied(pp)) {
471 * Reference the page before unlocking and
472 * sleeping so that the page daemon is less
473 * likely to reclaim it.
475 vm_page_reference(pp);
477 zfs_vmobject_wunlock(obj);
478 vm_page_busy_sleep(pp, "zfsmwb", true);
479 zfs_vmobject_wlock(obj);
483 ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
496 page_unhold(vm_page_t pp)
505 * When a file is memory mapped, we must keep the IO data synchronized
506 * between the DMU cache and the memory mapped pages. What this means:
508 * On Write: If we find a memory mapped page, we write to *both*
509 * the page and the dmu buffer.
512 update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
513 int segflg, dmu_tx_t *tx)
520 ASSERT(segflg != UIO_NOCOPY);
521 ASSERT(vp->v_mount != NULL);
525 off = start & PAGEOFFSET;
526 zfs_vmobject_wlock(obj);
527 for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
529 int nbytes = imin(PAGESIZE - off, len);
531 if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
532 zfs_vmobject_wunlock(obj);
534 va = zfs_map_page(pp, &sf);
535 (void) dmu_read(os, oid, start+off, nbytes,
536 va+off, DMU_READ_PREFETCH);;
539 zfs_vmobject_wlock(obj);
545 vm_object_pip_wakeupn(obj, 0);
546 zfs_vmobject_wunlock(obj);
550 * Read with UIO_NOCOPY flag means that sendfile(2) requests
551 * ZFS to populate a range of page cache pages with data.
553 * NOTE: this function could be optimized to pre-allocate
554 * all pages in advance, drain exclusive busy on all of them,
555 * map them into contiguous KVA region and populate them
556 * in one single dmu_read() call.
559 mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
561 znode_t *zp = VTOZ(vp);
562 objset_t *os = zp->z_zfsvfs->z_os;
572 ASSERT(uio->uio_segflg == UIO_NOCOPY);
573 ASSERT(vp->v_mount != NULL);
576 ASSERT((uio->uio_loffset & PAGEOFFSET) == 0);
578 zfs_vmobject_wlock(obj);
579 for (start = uio->uio_loffset; len > 0; start += PAGESIZE) {
580 int bytes = MIN(PAGESIZE, len);
582 pp = vm_page_grab(obj, OFF_TO_IDX(start), VM_ALLOC_SBUSY |
583 VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY);
584 if (pp->valid == 0) {
585 zfs_vmobject_wunlock(obj);
586 va = zfs_map_page(pp, &sf);
587 error = dmu_read(os, zp->z_id, start, bytes, va,
589 if (bytes != PAGESIZE && error == 0)
590 bzero(va + bytes, PAGESIZE - bytes);
592 zfs_vmobject_wlock(obj);
596 if (pp->wire_count == 0 && pp->valid == 0 &&
600 pp->valid = VM_PAGE_BITS_ALL;
601 vm_page_activate(pp);
605 ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
610 uio->uio_resid -= bytes;
611 uio->uio_offset += bytes;
614 zfs_vmobject_wunlock(obj);
619 * When a file is memory mapped, we must keep the IO data synchronized
620 * between the DMU cache and the memory mapped pages. What this means:
622 * On Read: We "read" preferentially from memory mapped pages,
623 * else we default from the dmu buffer.
625 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
626 * the file is memory mapped.
629 mappedread(vnode_t *vp, int nbytes, uio_t *uio)
631 znode_t *zp = VTOZ(vp);
639 ASSERT(vp->v_mount != NULL);
643 start = uio->uio_loffset;
644 off = start & PAGEOFFSET;
645 zfs_vmobject_wlock(obj);
646 for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
648 uint64_t bytes = MIN(PAGESIZE - off, len);
650 if (pp = page_hold(vp, start)) {
654 zfs_vmobject_wunlock(obj);
655 va = zfs_map_page(pp, &sf);
657 error = uiomove(va + off, bytes, UIO_READ, uio);
659 error = vn_io_fault_uiomove(va + off, bytes, uio);
662 zfs_vmobject_wlock(obj);
665 zfs_vmobject_wunlock(obj);
666 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
668 zfs_vmobject_wlock(obj);
675 zfs_vmobject_wunlock(obj);
679 offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
682 * Read bytes from specified file into supplied buffer.
684 * IN: vp - vnode of file to be read from.
685 * uio - structure supplying read location, range info,
687 * ioflag - SYNC flags; used to provide FRSYNC semantics.
688 * cr - credentials of caller.
689 * ct - caller context
691 * OUT: uio - updated offset and range, buffer filled.
693 * RETURN: 0 on success, error code on failure.
696 * vp - atime updated if byte count > 0
700 zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
702 znode_t *zp = VTOZ(vp);
703 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
712 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
714 return (SET_ERROR(EACCES));
718 * Validate file offset
720 if (uio->uio_loffset < (offset_t)0) {
722 return (SET_ERROR(EINVAL));
726 * Fasttrack empty reads
728 if (uio->uio_resid == 0) {
734 * Check for mandatory locks
736 if (MANDMODE(zp->z_mode)) {
737 if (error = chklock(vp, FREAD,
738 uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) {
745 * If we're in FRSYNC mode, sync out this znode before reading it.
748 (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
749 zil_commit(zfsvfs->z_log, zp->z_id);
752 * Lock the range against changes.
754 rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER);
757 * If we are reading past end-of-file we can skip
758 * to the end; but we might still need to set atime.
760 if (uio->uio_loffset >= zp->z_size) {
765 ASSERT(uio->uio_loffset < zp->z_size);
766 n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
769 if ((uio->uio_extflg == UIO_XUIO) &&
770 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
772 int blksz = zp->z_blksz;
773 uint64_t offset = uio->uio_loffset;
775 xuio = (xuio_t *)uio;
777 nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
780 ASSERT(offset + n <= blksz);
783 (void) dmu_xuio_init(xuio, nblk);
785 if (vn_has_cached_data(vp)) {
787 * For simplicity, we always allocate a full buffer
788 * even if we only expect to read a portion of a block.
790 while (--nblk >= 0) {
791 (void) dmu_xuio_add(xuio,
792 dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
800 nbytes = MIN(n, zfs_read_chunk_size -
801 P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
804 if (uio->uio_segflg == UIO_NOCOPY)
805 error = mappedread_sf(vp, nbytes, uio);
807 #endif /* __FreeBSD__ */
808 if (vn_has_cached_data(vp)) {
809 error = mappedread(vp, nbytes, uio);
811 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
815 /* convert checksum errors into IO errors */
817 error = SET_ERROR(EIO);
824 zfs_range_unlock(rl);
826 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
832 * Write the bytes to a file.
834 * IN: vp - vnode of file to be written to.
835 * uio - structure supplying write location, range info,
837 * ioflag - FAPPEND, FSYNC, and/or FDSYNC. FAPPEND is
838 * set if in append mode.
839 * cr - credentials of caller.
840 * ct - caller context (NFS/CIFS fem monitor only)
842 * OUT: uio - updated offset and range.
844 * RETURN: 0 on success, error code on failure.
847 * vp - ctime|mtime updated if byte count > 0
852 zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
854 znode_t *zp = VTOZ(vp);
855 rlim64_t limit = MAXOFFSET_T;
856 ssize_t start_resid = uio->uio_resid;
860 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
865 int max_blksz = zfsvfs->z_max_blksz;
868 iovec_t *aiov = NULL;
871 int iovcnt = uio->uio_iovcnt;
872 iovec_t *iovp = uio->uio_iov;
875 sa_bulk_attr_t bulk[4];
876 uint64_t mtime[2], ctime[2];
879 * Fasttrack empty write
885 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
891 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
892 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
893 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
895 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
899 * In a case vp->v_vfsp != zp->z_zfsvfs->z_vfs (e.g. snapshots) our
900 * callers might not be able to detect properly that we are read-only,
901 * so check it explicitly here.
903 if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
905 return (SET_ERROR(EROFS));
909 * If immutable or not appending then return EPERM.
910 * Intentionally allow ZFS_READONLY through here.
911 * See zfs_zaccess_common()
913 if ((zp->z_pflags & ZFS_IMMUTABLE) ||
914 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
915 (uio->uio_loffset < zp->z_size))) {
917 return (SET_ERROR(EPERM));
920 zilog = zfsvfs->z_log;
923 * Validate file offset
925 woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
928 return (SET_ERROR(EINVAL));
932 * Check for mandatory locks before calling zfs_range_lock()
933 * in order to prevent a deadlock with locks set via fcntl().
935 if (MANDMODE((mode_t)zp->z_mode) &&
936 (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) {
943 * Pre-fault the pages to ensure slow (eg NFS) pages
945 * Skip this if uio contains loaned arc_buf.
947 if ((uio->uio_extflg == UIO_XUIO) &&
948 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
949 xuio = (xuio_t *)uio;
951 uio_prefaultpages(MIN(n, max_blksz), uio);
955 * If in append mode, set the io offset pointer to eof.
957 if (ioflag & FAPPEND) {
959 * Obtain an appending range lock to guarantee file append
960 * semantics. We reset the write offset once we have the lock.
962 rl = zfs_range_lock(zp, 0, n, RL_APPEND);
964 if (rl->r_len == UINT64_MAX) {
966 * We overlocked the file because this write will cause
967 * the file block size to increase.
968 * Note that zp_size cannot change with this lock held.
972 uio->uio_loffset = woff;
975 * Note that if the file block size will change as a result of
976 * this write, then this range lock will lock the entire file
977 * so that we can re-write the block safely.
979 rl = zfs_range_lock(zp, woff, n, RL_WRITER);
982 if (vn_rlimit_fsize(vp, uio, uio->uio_td)) {
983 zfs_range_unlock(rl);
989 zfs_range_unlock(rl);
991 return (SET_ERROR(EFBIG));
994 if ((woff + n) > limit || woff > (limit - n))
997 /* Will this write extend the file length? */
998 write_eof = (woff + n > zp->z_size);
1000 end_size = MAX(zp->z_size, woff + n);
1003 * Write the file in reasonable size chunks. Each chunk is written
1004 * in a separate transaction; this keeps the intent log records small
1005 * and allows us to do more fine-grained space accounting.
1009 woff = uio->uio_loffset;
1010 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
1011 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
1013 dmu_return_arcbuf(abuf);
1014 error = SET_ERROR(EDQUOT);
1018 if (xuio && abuf == NULL) {
1019 ASSERT(i_iov < iovcnt);
1020 aiov = &iovp[i_iov];
1021 abuf = dmu_xuio_arcbuf(xuio, i_iov);
1022 dmu_xuio_clear(xuio, i_iov);
1023 DTRACE_PROBE3(zfs_cp_write, int, i_iov,
1024 iovec_t *, aiov, arc_buf_t *, abuf);
1025 ASSERT((aiov->iov_base == abuf->b_data) ||
1026 ((char *)aiov->iov_base - (char *)abuf->b_data +
1027 aiov->iov_len == arc_buf_size(abuf)));
1029 } else if (abuf == NULL && n >= max_blksz &&
1030 woff >= zp->z_size &&
1031 P2PHASE(woff, max_blksz) == 0 &&
1032 zp->z_blksz == max_blksz) {
1034 * This write covers a full block. "Borrow" a buffer
1035 * from the dmu so that we can fill it before we enter
1036 * a transaction. This avoids the possibility of
1037 * holding up the transaction if the data copy hangs
1038 * up on a pagefault (e.g., from an NFS server mapping).
1044 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
1046 ASSERT(abuf != NULL);
1047 ASSERT(arc_buf_size(abuf) == max_blksz);
1049 if (error = uiocopy(abuf->b_data, max_blksz,
1050 UIO_WRITE, uio, &cbytes)) {
1051 dmu_return_arcbuf(abuf);
1054 ASSERT(cbytes == max_blksz);
1056 ssize_t resid = uio->uio_resid;
1057 error = vn_io_fault_uiomove(abuf->b_data, max_blksz, uio);
1059 uio->uio_offset -= resid - uio->uio_resid;
1060 uio->uio_resid = resid;
1061 dmu_return_arcbuf(abuf);
1068 * Start a transaction.
1070 tx = dmu_tx_create(zfsvfs->z_os);
1071 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1072 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
1073 zfs_sa_upgrade_txholds(tx, zp);
1074 error = dmu_tx_assign(tx, TXG_WAIT);
1078 dmu_return_arcbuf(abuf);
1083 * If zfs_range_lock() over-locked we grow the blocksize
1084 * and then reduce the lock range. This will only happen
1085 * on the first iteration since zfs_range_reduce() will
1086 * shrink down r_len to the appropriate size.
1088 if (rl->r_len == UINT64_MAX) {
1091 if (zp->z_blksz > max_blksz) {
1093 * File's blocksize is already larger than the
1094 * "recordsize" property. Only let it grow to
1095 * the next power of 2.
1097 ASSERT(!ISP2(zp->z_blksz));
1098 new_blksz = MIN(end_size,
1099 1 << highbit64(zp->z_blksz));
1101 new_blksz = MIN(end_size, max_blksz);
1103 zfs_grow_blocksize(zp, new_blksz, tx);
1104 zfs_range_reduce(rl, woff, n);
1108 * XXX - should we really limit each write to z_max_blksz?
1109 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
1111 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
1113 if (woff + nbytes > zp->z_size)
1114 vnode_pager_setsize(vp, woff + nbytes);
1117 tx_bytes = uio->uio_resid;
1118 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
1120 tx_bytes -= uio->uio_resid;
1123 ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
1125 * If this is not a full block write, but we are
1126 * extending the file past EOF and this data starts
1127 * block-aligned, use assign_arcbuf(). Otherwise,
1128 * write via dmu_write().
1130 if (tx_bytes < max_blksz && (!write_eof ||
1131 aiov->iov_base != abuf->b_data)) {
1133 dmu_write(zfsvfs->z_os, zp->z_id, woff,
1134 aiov->iov_len, aiov->iov_base, tx);
1135 dmu_return_arcbuf(abuf);
1136 xuio_stat_wbuf_copied();
1138 ASSERT(xuio || tx_bytes == max_blksz);
1139 dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl),
1143 ASSERT(tx_bytes <= uio->uio_resid);
1144 uioskip(uio, tx_bytes);
1147 if (tx_bytes && vn_has_cached_data(vp)) {
1148 update_pages(vp, woff, tx_bytes, zfsvfs->z_os,
1149 zp->z_id, uio->uio_segflg, tx);
1153 * If we made no progress, we're done. If we made even
1154 * partial progress, update the znode and ZIL accordingly.
1156 if (tx_bytes == 0) {
1157 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
1158 (void *)&zp->z_size, sizeof (uint64_t), tx);
1165 * Clear Set-UID/Set-GID bits on successful write if not
1166 * privileged and at least one of the excute bits is set.
1168 * It would be nice to to this after all writes have
1169 * been done, but that would still expose the ISUID/ISGID
1170 * to another app after the partial write is committed.
1172 * Note: we don't call zfs_fuid_map_id() here because
1173 * user 0 is not an ephemeral uid.
1175 mutex_enter(&zp->z_acl_lock);
1176 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
1177 (S_IXUSR >> 6))) != 0 &&
1178 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
1179 secpolicy_vnode_setid_retain(vp, cr,
1180 (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) {
1182 zp->z_mode &= ~(S_ISUID | S_ISGID);
1183 newmode = zp->z_mode;
1184 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
1185 (void *)&newmode, sizeof (uint64_t), tx);
1187 mutex_exit(&zp->z_acl_lock);
1189 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
1193 * Update the file size (zp_size) if it has changed;
1194 * account for possible concurrent updates.
1196 while ((end_size = zp->z_size) < uio->uio_loffset) {
1197 (void) atomic_cas_64(&zp->z_size, end_size,
1202 ASSERT(error == 0 || error == EFAULT);
1206 * If we are replaying and eof is non zero then force
1207 * the file size to the specified eof. Note, there's no
1208 * concurrency during replay.
1210 if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
1211 zp->z_size = zfsvfs->z_replay_eof;
1214 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
1216 (void) sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
1218 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag);
1223 ASSERT(tx_bytes == nbytes);
1228 uio_prefaultpages(MIN(n, max_blksz), uio);
1232 zfs_range_unlock(rl);
1235 * If we're in replay mode, or we made no progress, return error.
1236 * Otherwise, it's at least a partial write, so it's successful.
1238 if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
1245 * EFAULT means that at least one page of the source buffer was not
1246 * available. VFS will re-try remaining I/O upon this error.
1248 if (error == EFAULT) {
1254 if (ioflag & (FSYNC | FDSYNC) ||
1255 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1256 zil_commit(zilog, zp->z_id);
1263 zfs_get_done(zgd_t *zgd, int error)
1265 znode_t *zp = zgd->zgd_private;
1266 objset_t *os = zp->z_zfsvfs->z_os;
1269 dmu_buf_rele(zgd->zgd_db, zgd);
1271 zfs_range_unlock(zgd->zgd_rl);
1274 * Release the vnode asynchronously as we currently have the
1275 * txg stopped from syncing.
1277 VN_RELE_ASYNC(ZTOV(zp), dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
1279 if (error == 0 && zgd->zgd_bp)
1280 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
1282 kmem_free(zgd, sizeof (zgd_t));
1286 static int zil_fault_io = 0;
1290 * Get data to generate a TX_WRITE intent log record.
1293 zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
1295 zfsvfs_t *zfsvfs = arg;
1296 objset_t *os = zfsvfs->z_os;
1298 uint64_t object = lr->lr_foid;
1299 uint64_t offset = lr->lr_offset;
1300 uint64_t size = lr->lr_length;
1305 ASSERT3P(lwb, !=, NULL);
1306 ASSERT3P(zio, !=, NULL);
1307 ASSERT3U(size, !=, 0);
1310 * Nothing to do if the file has been removed
1312 if (zfs_zget(zfsvfs, object, &zp) != 0)
1313 return (SET_ERROR(ENOENT));
1314 if (zp->z_unlinked) {
1316 * Release the vnode asynchronously as we currently have the
1317 * txg stopped from syncing.
1319 VN_RELE_ASYNC(ZTOV(zp),
1320 dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
1321 return (SET_ERROR(ENOENT));
1324 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1326 zgd->zgd_private = zp;
1329 * Write records come in two flavors: immediate and indirect.
1330 * For small writes it's cheaper to store the data with the
1331 * log record (immediate); for large writes it's cheaper to
1332 * sync the data and get a pointer to it (indirect) so that
1333 * we don't have to write the data twice.
1335 if (buf != NULL) { /* immediate write */
1336 zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER);
1337 /* test for truncation needs to be done while range locked */
1338 if (offset >= zp->z_size) {
1339 error = SET_ERROR(ENOENT);
1341 error = dmu_read(os, object, offset, size, buf,
1342 DMU_READ_NO_PREFETCH);
1344 ASSERT(error == 0 || error == ENOENT);
1345 } else { /* indirect write */
1347 * Have to lock the whole block to ensure when it's
1348 * written out and its checksum is being calculated
1349 * that no one can change the data. We need to re-check
1350 * blocksize after we get the lock in case it's changed!
1355 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
1357 zgd->zgd_rl = zfs_range_lock(zp, offset, size,
1359 if (zp->z_blksz == size)
1362 zfs_range_unlock(zgd->zgd_rl);
1364 /* test for truncation needs to be done while range locked */
1365 if (lr->lr_offset >= zp->z_size)
1366 error = SET_ERROR(ENOENT);
1369 error = SET_ERROR(EIO);
1374 error = dmu_buf_hold(os, object, offset, zgd, &db,
1375 DMU_READ_NO_PREFETCH);
1378 blkptr_t *bp = &lr->lr_blkptr;
1383 ASSERT(db->db_offset == offset);
1384 ASSERT(db->db_size == size);
1386 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1388 ASSERT(error || lr->lr_length <= size);
1391 * On success, we need to wait for the write I/O
1392 * initiated by dmu_sync() to complete before we can
1393 * release this dbuf. We will finish everything up
1394 * in the zfs_get_done() callback.
1399 if (error == EALREADY) {
1400 lr->lr_common.lrc_txtype = TX_WRITE2;
1406 zfs_get_done(zgd, error);
1413 zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr,
1414 caller_context_t *ct)
1416 znode_t *zp = VTOZ(vp);
1417 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1423 if (flag & V_ACE_MASK)
1424 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
1426 error = zfs_zaccess_rwx(zp, mode, flag, cr);
1433 zfs_dd_callback(struct mount *mp, void *arg, int lkflags, struct vnode **vpp)
1438 error = vn_lock(*vpp, lkflags);
1445 zfs_lookup_lock(vnode_t *dvp, vnode_t *vp, const char *name, int lkflags)
1447 znode_t *zdp = VTOZ(dvp);
1448 zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
1452 ASSERT_VOP_LOCKED(dvp, __func__);
1454 if ((zdp->z_pflags & ZFS_XATTR) == 0)
1455 VERIFY(!RRM_LOCK_HELD(&zfsvfs->z_teardown_lock));
1458 if (name[0] == 0 || (name[0] == '.' && name[1] == 0)) {
1459 ASSERT3P(dvp, ==, vp);
1461 ltype = lkflags & LK_TYPE_MASK;
1462 if (ltype != VOP_ISLOCKED(dvp)) {
1463 if (ltype == LK_EXCLUSIVE)
1464 vn_lock(dvp, LK_UPGRADE | LK_RETRY);
1465 else /* if (ltype == LK_SHARED) */
1466 vn_lock(dvp, LK_DOWNGRADE | LK_RETRY);
1469 * Relock for the "." case could leave us with
1472 if (dvp->v_iflag & VI_DOOMED) {
1474 return (SET_ERROR(ENOENT));
1478 } else if (name[0] == '.' && name[1] == '.' && name[2] == 0) {
1480 * Note that in this case, dvp is the child vnode, and we
1481 * are looking up the parent vnode - exactly reverse from
1482 * normal operation. Unlocking dvp requires some rather
1483 * tricky unlock/relock dance to prevent mp from being freed;
1484 * use vn_vget_ino_gen() which takes care of all that.
1486 * XXX Note that there is a time window when both vnodes are
1487 * unlocked. It is possible, although highly unlikely, that
1488 * during that window the parent-child relationship between
1489 * the vnodes may change, for example, get reversed.
1490 * In that case we would have a wrong lock order for the vnodes.
1491 * All other filesystems seem to ignore this problem, so we
1493 * A potential solution could be implemented as follows:
1494 * - using LK_NOWAIT when locking the second vnode and retrying
1496 * - checking that the parent-child relationship still holds
1497 * after locking both vnodes and retrying if it doesn't
1499 error = vn_vget_ino_gen(dvp, zfs_dd_callback, vp, lkflags, &vp);
1502 error = vn_lock(vp, lkflags);
1510 * Lookup an entry in a directory, or an extended attribute directory.
1511 * If it exists, return a held vnode reference for it.
1513 * IN: dvp - vnode of directory to search.
1514 * nm - name of entry to lookup.
1515 * pnp - full pathname to lookup [UNUSED].
1516 * flags - LOOKUP_XATTR set if looking for an attribute.
1517 * rdir - root directory vnode [UNUSED].
1518 * cr - credentials of caller.
1519 * ct - caller context
1521 * OUT: vpp - vnode of located entry, NULL if not found.
1523 * RETURN: 0 on success, error code on failure.
1530 zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp,
1531 int nameiop, cred_t *cr, kthread_t *td, int flags)
1533 znode_t *zdp = VTOZ(dvp);
1535 zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
1539 * Fast path lookup, however we must skip DNLC lookup
1540 * for case folding or normalizing lookups because the
1541 * DNLC code only stores the passed in name. This means
1542 * creating 'a' and removing 'A' on a case insensitive
1543 * file system would work, but DNLC still thinks 'a'
1544 * exists and won't let you create it again on the next
1545 * pass through fast path.
1547 if (!(flags & LOOKUP_XATTR)) {
1548 if (dvp->v_type != VDIR) {
1549 return (SET_ERROR(ENOTDIR));
1550 } else if (zdp->z_sa_hdl == NULL) {
1551 return (SET_ERROR(EIO));
1555 DTRACE_PROBE2(zfs__fastpath__lookup__miss, vnode_t *, dvp, char *, nm);
1562 if (flags & LOOKUP_XATTR) {
1565 * If the xattr property is off, refuse the lookup request.
1567 if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) {
1569 return (SET_ERROR(EINVAL));
1574 * We don't allow recursive attributes..
1575 * Maybe someday we will.
1577 if (zdp->z_pflags & ZFS_XATTR) {
1579 return (SET_ERROR(EINVAL));
1582 if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) {
1588 * Do we have permission to get into attribute directory?
1590 if (error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, 0,
1601 * Check accessibility of directory.
1603 if (error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr)) {
1608 if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
1609 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1611 return (SET_ERROR(EILSEQ));
1616 * First handle the special cases.
1618 if ((cnp->cn_flags & ISDOTDOT) != 0) {
1620 * If we are a snapshot mounted under .zfs, return
1621 * the vp for the snapshot directory.
1623 if (zdp->z_id == zfsvfs->z_root && zfsvfs->z_parent != zfsvfs) {
1624 struct componentname cn;
1629 ltype = VOP_ISLOCKED(dvp);
1631 error = zfsctl_root(zfsvfs->z_parent, LK_SHARED,
1634 cn.cn_nameptr = "snapshot";
1635 cn.cn_namelen = strlen(cn.cn_nameptr);
1636 cn.cn_nameiop = cnp->cn_nameiop;
1637 cn.cn_flags = cnp->cn_flags & ~ISDOTDOT;
1638 cn.cn_lkflags = cnp->cn_lkflags;
1639 error = VOP_LOOKUP(zfsctl_vp, vpp, &cn);
1642 vn_lock(dvp, ltype | LK_RETRY);
1646 if (zfs_has_ctldir(zdp) && strcmp(nm, ZFS_CTLDIR_NAME) == 0) {
1648 if ((cnp->cn_flags & ISLASTCN) != 0 && nameiop != LOOKUP)
1649 return (SET_ERROR(ENOTSUP));
1650 error = zfsctl_root(zfsvfs, cnp->cn_lkflags, vpp);
1655 * The loop is retry the lookup if the parent-child relationship
1656 * changes during the dot-dot locking complexities.
1661 error = zfs_dirlook(zdp, nm, &zp);
1669 error = zfs_lookup_lock(dvp, *vpp, nm, cnp->cn_lkflags);
1672 * If we've got a locking error, then the vnode
1673 * got reclaimed because of a force unmount.
1674 * We never enter doomed vnodes into the name cache.
1680 if ((cnp->cn_flags & ISDOTDOT) == 0)
1684 if (zdp->z_sa_hdl == NULL) {
1685 error = SET_ERROR(EIO);
1687 error = sa_lookup(zdp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
1688 &parent, sizeof (parent));
1695 if (zp->z_id == parent) {
1706 /* Translate errors and add SAVENAME when needed. */
1707 if (cnp->cn_flags & ISLASTCN) {
1711 if (error == ENOENT) {
1712 error = EJUSTRETURN;
1713 cnp->cn_flags |= SAVENAME;
1719 cnp->cn_flags |= SAVENAME;
1724 /* Insert name into cache (as non-existent) if appropriate. */
1725 if (zfsvfs->z_use_namecache &&
1726 error == ENOENT && (cnp->cn_flags & MAKEENTRY) != 0)
1727 cache_enter(dvp, NULL, cnp);
1729 /* Insert name into cache if appropriate. */
1730 if (zfsvfs->z_use_namecache &&
1731 error == 0 && (cnp->cn_flags & MAKEENTRY)) {
1732 if (!(cnp->cn_flags & ISLASTCN) ||
1733 (nameiop != DELETE && nameiop != RENAME)) {
1734 cache_enter(dvp, *vpp, cnp);
1742 * Attempt to create a new entry in a directory. If the entry
1743 * already exists, truncate the file if permissible, else return
1744 * an error. Return the vp of the created or trunc'd file.
1746 * IN: dvp - vnode of directory to put new file entry in.
1747 * name - name of new file entry.
1748 * vap - attributes of new file.
1749 * excl - flag indicating exclusive or non-exclusive mode.
1750 * mode - mode to open file with.
1751 * cr - credentials of caller.
1752 * flag - large file flag [UNUSED].
1753 * ct - caller context
1754 * vsecp - ACL to be set
1756 * OUT: vpp - vnode of created or trunc'd entry.
1758 * RETURN: 0 on success, error code on failure.
1761 * dvp - ctime|mtime updated if new entry created
1762 * vp - ctime|mtime always, atime if new
1767 zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode,
1768 vnode_t **vpp, cred_t *cr, kthread_t *td)
1770 znode_t *zp, *dzp = VTOZ(dvp);
1771 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1778 gid_t gid = crgetgid(cr);
1779 zfs_acl_ids_t acl_ids;
1780 boolean_t fuid_dirtied;
1786 * If we have an ephemeral id, ACL, or XVATTR then
1787 * make sure file system is at proper version
1790 ksid = crgetsid(cr, KSID_OWNER);
1792 uid = ksid_getid(ksid);
1796 if (zfsvfs->z_use_fuids == B_FALSE &&
1797 (vsecp || (vap->va_mask & AT_XVATTR) ||
1798 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1799 return (SET_ERROR(EINVAL));
1804 zilog = zfsvfs->z_log;
1806 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
1807 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1809 return (SET_ERROR(EILSEQ));
1812 if (vap->va_mask & AT_XVATTR) {
1813 if ((error = secpolicy_xvattr(dvp, (xvattr_t *)vap,
1814 crgetuid(cr), cr, vap->va_type)) != 0) {
1822 if ((vap->va_mode & S_ISVTX) && secpolicy_vnode_stky_modify(cr))
1823 vap->va_mode &= ~S_ISVTX;
1825 error = zfs_dirent_lookup(dzp, name, &zp, ZNEW);
1830 ASSERT3P(zp, ==, NULL);
1833 * Create a new file object and update the directory
1836 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
1841 * We only support the creation of regular files in
1842 * extended attribute directories.
1845 if ((dzp->z_pflags & ZFS_XATTR) &&
1846 (vap->va_type != VREG)) {
1847 error = SET_ERROR(EINVAL);
1851 if ((error = zfs_acl_ids_create(dzp, 0, vap,
1852 cr, vsecp, &acl_ids)) != 0)
1855 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1856 zfs_acl_ids_free(&acl_ids);
1857 error = SET_ERROR(EDQUOT);
1861 getnewvnode_reserve(1);
1863 tx = dmu_tx_create(os);
1865 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1866 ZFS_SA_BASE_ATTR_SIZE);
1868 fuid_dirtied = zfsvfs->z_fuid_dirty;
1870 zfs_fuid_txhold(zfsvfs, tx);
1871 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1872 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1873 if (!zfsvfs->z_use_sa &&
1874 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1875 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1876 0, acl_ids.z_aclp->z_acl_bytes);
1878 error = dmu_tx_assign(tx, TXG_WAIT);
1880 zfs_acl_ids_free(&acl_ids);
1882 getnewvnode_drop_reserve();
1886 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1889 zfs_fuid_sync(zfsvfs, tx);
1891 (void) zfs_link_create(dzp, name, zp, tx, ZNEW);
1892 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1893 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1894 vsecp, acl_ids.z_fuidp, vap);
1895 zfs_acl_ids_free(&acl_ids);
1898 getnewvnode_drop_reserve();
1905 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1906 zil_commit(zilog, 0);
1913 * Remove an entry from a directory.
1915 * IN: dvp - vnode of directory to remove entry from.
1916 * name - name of entry to remove.
1917 * cr - credentials of caller.
1918 * ct - caller context
1919 * flags - case flags
1921 * RETURN: 0 on success, error code on failure.
1925 * vp - ctime (if nlink > 0)
1930 zfs_remove(vnode_t *dvp, vnode_t *vp, char *name, cred_t *cr)
1932 znode_t *dzp = VTOZ(dvp);
1933 znode_t *zp = VTOZ(vp);
1935 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1937 uint64_t acl_obj, xattr_obj;
1940 boolean_t unlinked, toobig = FALSE;
1947 zilog = zfsvfs->z_log;
1953 if (error = zfs_zaccess_delete(dzp, zp, cr)) {
1958 * Need to use rmdir for removing directories.
1960 if (vp->v_type == VDIR) {
1961 error = SET_ERROR(EPERM);
1965 vnevent_remove(vp, dvp, name, ct);
1969 /* are there any extended attributes? */
1970 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1971 &xattr_obj, sizeof (xattr_obj));
1972 if (error == 0 && xattr_obj) {
1973 error = zfs_zget(zfsvfs, xattr_obj, &xzp);
1978 * We may delete the znode now, or we may put it in the unlinked set;
1979 * it depends on whether we're the last link, and on whether there are
1980 * other holds on the vnode. So we dmu_tx_hold() the right things to
1981 * allow for either case.
1983 tx = dmu_tx_create(zfsvfs->z_os);
1984 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1985 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1986 zfs_sa_upgrade_txholds(tx, zp);
1987 zfs_sa_upgrade_txholds(tx, dzp);
1990 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1991 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1994 /* charge as an update -- would be nice not to charge at all */
1995 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1998 * Mark this transaction as typically resulting in a net free of space
2000 dmu_tx_mark_netfree(tx);
2002 error = dmu_tx_assign(tx, TXG_WAIT);
2010 * Remove the directory entry.
2012 error = zfs_link_destroy(dzp, name, zp, tx, ZEXISTS, &unlinked);
2020 zfs_unlinked_add(zp, tx);
2021 vp->v_vflag |= VV_NOSYNC;
2025 zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
2033 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2034 zil_commit(zilog, 0);
2041 * Create a new directory and insert it into dvp using the name
2042 * provided. Return a pointer to the inserted directory.
2044 * IN: dvp - vnode of directory to add subdir to.
2045 * dirname - name of new directory.
2046 * vap - attributes of new directory.
2047 * cr - credentials of caller.
2048 * ct - caller context
2049 * flags - case flags
2050 * vsecp - ACL to be set
2052 * OUT: vpp - vnode of created directory.
2054 * RETURN: 0 on success, error code on failure.
2057 * dvp - ctime|mtime updated
2058 * vp - ctime|mtime|atime updated
2062 zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr)
2064 znode_t *zp, *dzp = VTOZ(dvp);
2065 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
2072 gid_t gid = crgetgid(cr);
2073 zfs_acl_ids_t acl_ids;
2074 boolean_t fuid_dirtied;
2076 ASSERT(vap->va_type == VDIR);
2079 * If we have an ephemeral id, ACL, or XVATTR then
2080 * make sure file system is at proper version
2083 ksid = crgetsid(cr, KSID_OWNER);
2085 uid = ksid_getid(ksid);
2088 if (zfsvfs->z_use_fuids == B_FALSE &&
2089 ((vap->va_mask & AT_XVATTR) ||
2090 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
2091 return (SET_ERROR(EINVAL));
2095 zilog = zfsvfs->z_log;
2097 if (dzp->z_pflags & ZFS_XATTR) {
2099 return (SET_ERROR(EINVAL));
2102 if (zfsvfs->z_utf8 && u8_validate(dirname,
2103 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
2105 return (SET_ERROR(EILSEQ));
2108 if (vap->va_mask & AT_XVATTR) {
2109 if ((error = secpolicy_xvattr(dvp, (xvattr_t *)vap,
2110 crgetuid(cr), cr, vap->va_type)) != 0) {
2116 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
2117 NULL, &acl_ids)) != 0) {
2123 * First make sure the new directory doesn't exist.
2125 * Existence is checked first to make sure we don't return
2126 * EACCES instead of EEXIST which can cause some applications
2131 if (error = zfs_dirent_lookup(dzp, dirname, &zp, ZNEW)) {
2132 zfs_acl_ids_free(&acl_ids);
2136 ASSERT3P(zp, ==, NULL);
2138 if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr)) {
2139 zfs_acl_ids_free(&acl_ids);
2144 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
2145 zfs_acl_ids_free(&acl_ids);
2147 return (SET_ERROR(EDQUOT));
2151 * Add a new entry to the directory.
2153 getnewvnode_reserve(1);
2154 tx = dmu_tx_create(zfsvfs->z_os);
2155 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
2156 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
2157 fuid_dirtied = zfsvfs->z_fuid_dirty;
2159 zfs_fuid_txhold(zfsvfs, tx);
2160 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2161 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
2162 acl_ids.z_aclp->z_acl_bytes);
2165 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
2166 ZFS_SA_BASE_ATTR_SIZE);
2168 error = dmu_tx_assign(tx, TXG_WAIT);
2170 zfs_acl_ids_free(&acl_ids);
2172 getnewvnode_drop_reserve();
2180 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
2183 zfs_fuid_sync(zfsvfs, tx);
2186 * Now put new name in parent dir.
2188 (void) zfs_link_create(dzp, dirname, zp, tx, ZNEW);
2192 txtype = zfs_log_create_txtype(Z_DIR, NULL, vap);
2193 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, NULL,
2194 acl_ids.z_fuidp, vap);
2196 zfs_acl_ids_free(&acl_ids);
2200 getnewvnode_drop_reserve();
2202 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2203 zil_commit(zilog, 0);
2210 * Remove a directory subdir entry. If the current working
2211 * directory is the same as the subdir to be removed, the
2214 * IN: dvp - vnode of directory to remove from.
2215 * name - name of directory to be removed.
2216 * cwd - vnode of current working directory.
2217 * cr - credentials of caller.
2218 * ct - caller context
2219 * flags - case flags
2221 * RETURN: 0 on success, error code on failure.
2224 * dvp - ctime|mtime updated
2228 zfs_rmdir(vnode_t *dvp, vnode_t *vp, char *name, cred_t *cr)
2230 znode_t *dzp = VTOZ(dvp);
2231 znode_t *zp = VTOZ(vp);
2232 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
2240 zilog = zfsvfs->z_log;
2243 if (error = zfs_zaccess_delete(dzp, zp, cr)) {
2247 if (vp->v_type != VDIR) {
2248 error = SET_ERROR(ENOTDIR);
2252 vnevent_rmdir(vp, dvp, name, ct);
2254 tx = dmu_tx_create(zfsvfs->z_os);
2255 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
2256 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2257 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
2258 zfs_sa_upgrade_txholds(tx, zp);
2259 zfs_sa_upgrade_txholds(tx, dzp);
2260 dmu_tx_mark_netfree(tx);
2261 error = dmu_tx_assign(tx, TXG_WAIT);
2270 error = zfs_link_destroy(dzp, name, zp, tx, ZEXISTS, NULL);
2273 uint64_t txtype = TX_RMDIR;
2274 zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
2281 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2282 zil_commit(zilog, 0);
2289 * Read as many directory entries as will fit into the provided
2290 * buffer from the given directory cursor position (specified in
2291 * the uio structure).
2293 * IN: vp - vnode of directory to read.
2294 * uio - structure supplying read location, range info,
2295 * and return buffer.
2296 * cr - credentials of caller.
2297 * ct - caller context
2298 * flags - case flags
2300 * OUT: uio - updated offset and range, buffer filled.
2301 * eofp - set to true if end-of-file detected.
2303 * RETURN: 0 on success, error code on failure.
2306 * vp - atime updated
2308 * Note that the low 4 bits of the cookie returned by zap is always zero.
2309 * This allows us to use the low range for "special" directory entries:
2310 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
2311 * we use the offset 2 for the '.zfs' directory.
2315 zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, int *ncookies, u_long **cookies)
2317 znode_t *zp = VTOZ(vp);
2321 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2326 zap_attribute_t zap;
2327 uint_t bytes_wanted;
2328 uint64_t offset; /* must be unsigned; checks for < 1 */
2334 boolean_t check_sysattrs;
2337 u_long *cooks = NULL;
2343 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
2344 &parent, sizeof (parent))) != 0) {
2350 * If we are not given an eof variable,
2357 * Check for valid iov_len.
2359 if (uio->uio_iov->iov_len <= 0) {
2361 return (SET_ERROR(EINVAL));
2365 * Quit if directory has been removed (posix)
2367 if ((*eofp = zp->z_unlinked) != 0) {
2374 offset = uio->uio_loffset;
2375 prefetch = zp->z_zn_prefetch;
2378 * Initialize the iterator cursor.
2382 * Start iteration from the beginning of the directory.
2384 zap_cursor_init(&zc, os, zp->z_id);
2387 * The offset is a serialized cursor.
2389 zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
2393 * Get space to change directory entries into fs independent format.
2395 iovp = uio->uio_iov;
2396 bytes_wanted = iovp->iov_len;
2397 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) {
2398 bufsize = bytes_wanted;
2399 outbuf = kmem_alloc(bufsize, KM_SLEEP);
2400 odp = (struct dirent64 *)outbuf;
2402 bufsize = bytes_wanted;
2404 odp = (struct dirent64 *)iovp->iov_base;
2406 eodp = (struct edirent *)odp;
2408 if (ncookies != NULL) {
2410 * Minimum entry size is dirent size and 1 byte for a file name.
2412 ncooks = uio->uio_resid / (sizeof(struct dirent) - sizeof(((struct dirent *)NULL)->d_name) + 1);
2413 cooks = malloc(ncooks * sizeof(u_long), M_TEMP, M_WAITOK);
2418 * If this VFS supports the system attribute view interface; and
2419 * we're looking at an extended attribute directory; and we care
2420 * about normalization conflicts on this vfs; then we must check
2421 * for normalization conflicts with the sysattr name space.
2424 check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
2425 (vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm &&
2426 (flags & V_RDDIR_ENTFLAGS);
2432 * Transform to file-system independent format
2435 while (outcount < bytes_wanted) {
2438 off64_t *next = NULL;
2441 * Special case `.', `..', and `.zfs'.
2444 (void) strcpy(zap.za_name, ".");
2445 zap.za_normalization_conflict = 0;
2448 } else if (offset == 1) {
2449 (void) strcpy(zap.za_name, "..");
2450 zap.za_normalization_conflict = 0;
2453 } else if (offset == 2 && zfs_show_ctldir(zp)) {
2454 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
2455 zap.za_normalization_conflict = 0;
2456 objnum = ZFSCTL_INO_ROOT;
2462 if (error = zap_cursor_retrieve(&zc, &zap)) {
2463 if ((*eofp = (error == ENOENT)) != 0)
2469 if (zap.za_integer_length != 8 ||
2470 zap.za_num_integers != 1) {
2471 cmn_err(CE_WARN, "zap_readdir: bad directory "
2472 "entry, obj = %lld, offset = %lld\n",
2473 (u_longlong_t)zp->z_id,
2474 (u_longlong_t)offset);
2475 error = SET_ERROR(ENXIO);
2479 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
2481 * MacOS X can extract the object type here such as:
2482 * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2484 type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2486 if (check_sysattrs && !zap.za_normalization_conflict) {
2488 zap.za_normalization_conflict =
2489 xattr_sysattr_casechk(zap.za_name);
2491 panic("%s:%u: TODO", __func__, __LINE__);
2496 if (flags & V_RDDIR_ACCFILTER) {
2498 * If we have no access at all, don't include
2499 * this entry in the returned information
2502 if (zfs_zget(zp->z_zfsvfs, objnum, &ezp) != 0)
2504 if (!zfs_has_access(ezp, cr)) {
2511 if (flags & V_RDDIR_ENTFLAGS)
2512 reclen = EDIRENT_RECLEN(strlen(zap.za_name));
2514 reclen = DIRENT64_RECLEN(strlen(zap.za_name));
2517 * Will this entry fit in the buffer?
2519 if (outcount + reclen > bufsize) {
2521 * Did we manage to fit anything in the buffer?
2524 error = SET_ERROR(EINVAL);
2529 if (flags & V_RDDIR_ENTFLAGS) {
2531 * Add extended flag entry:
2533 eodp->ed_ino = objnum;
2534 eodp->ed_reclen = reclen;
2535 /* NOTE: ed_off is the offset for the *next* entry */
2536 next = &(eodp->ed_off);
2537 eodp->ed_eflags = zap.za_normalization_conflict ?
2538 ED_CASE_CONFLICT : 0;
2539 (void) strncpy(eodp->ed_name, zap.za_name,
2540 EDIRENT_NAMELEN(reclen));
2541 eodp = (edirent_t *)((intptr_t)eodp + reclen);
2546 odp->d_ino = objnum;
2547 odp->d_reclen = reclen;
2548 odp->d_namlen = strlen(zap.za_name);
2549 (void) strlcpy(odp->d_name, zap.za_name, odp->d_namlen + 1);
2551 odp = (dirent64_t *)((intptr_t)odp + reclen);
2555 ASSERT(outcount <= bufsize);
2557 /* Prefetch znode */
2559 dmu_prefetch(os, objnum, 0, 0, 0,
2560 ZIO_PRIORITY_SYNC_READ);
2564 * Move to the next entry, fill in the previous offset.
2566 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
2567 zap_cursor_advance(&zc);
2568 offset = zap_cursor_serialize(&zc);
2573 if (cooks != NULL) {
2576 KASSERT(ncooks >= 0, ("ncookies=%d", ncooks));
2579 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
2581 /* Subtract unused cookies */
2582 if (ncookies != NULL)
2583 *ncookies -= ncooks;
2585 if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) {
2586 iovp->iov_base += outcount;
2587 iovp->iov_len -= outcount;
2588 uio->uio_resid -= outcount;
2589 } else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) {
2591 * Reset the pointer.
2593 offset = uio->uio_loffset;
2597 zap_cursor_fini(&zc);
2598 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
2599 kmem_free(outbuf, bufsize);
2601 if (error == ENOENT)
2604 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
2606 uio->uio_loffset = offset;
2608 if (error != 0 && cookies != NULL) {
2609 free(*cookies, M_TEMP);
2616 ulong_t zfs_fsync_sync_cnt = 4;
2619 zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
2621 znode_t *zp = VTOZ(vp);
2622 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2624 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2626 if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
2629 zil_commit(zfsvfs->z_log, zp->z_id);
2637 * Get the requested file attributes and place them in the provided
2640 * IN: vp - vnode of file.
2641 * vap - va_mask identifies requested attributes.
2642 * If AT_XVATTR set, then optional attrs are requested
2643 * flags - ATTR_NOACLCHECK (CIFS server context)
2644 * cr - credentials of caller.
2645 * ct - caller context
2647 * OUT: vap - attribute values.
2649 * RETURN: 0 (always succeeds).
2653 zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2654 caller_context_t *ct)
2656 znode_t *zp = VTOZ(vp);
2657 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2660 u_longlong_t nblocks;
2662 uint64_t mtime[2], ctime[2], crtime[2], rdev;
2663 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2664 xoptattr_t *xoap = NULL;
2665 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2666 sa_bulk_attr_t bulk[4];
2672 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
2674 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
2675 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
2676 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16);
2677 if (vp->v_type == VBLK || vp->v_type == VCHR)
2678 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
2681 if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
2687 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2688 * Also, if we are the owner don't bother, since owner should
2689 * always be allowed to read basic attributes of file.
2691 if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
2692 (vap->va_uid != crgetuid(cr))) {
2693 if (error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2701 * Return all attributes. It's cheaper to provide the answer
2702 * than to determine whether we were asked the question.
2705 vap->va_type = IFTOVT(zp->z_mode);
2706 vap->va_mode = zp->z_mode & ~S_IFMT;
2708 vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev;
2712 vap->va_nodeid = zp->z_id;
2713 if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp))
2714 links = zp->z_links + 1;
2716 links = zp->z_links;
2717 vap->va_nlink = MIN(links, LINK_MAX); /* nlink_t limit! */
2718 vap->va_size = zp->z_size;
2720 vap->va_rdev = vp->v_rdev;
2722 if (vp->v_type == VBLK || vp->v_type == VCHR)
2723 vap->va_rdev = zfs_cmpldev(rdev);
2725 vap->va_seq = zp->z_seq;
2726 vap->va_flags = 0; /* FreeBSD: Reset chflags(2) flags. */
2727 vap->va_filerev = zp->z_seq;
2730 * Add in any requested optional attributes and the create time.
2731 * Also set the corresponding bits in the returned attribute bitmap.
2733 if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
2734 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2736 ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2737 XVA_SET_RTN(xvap, XAT_ARCHIVE);
2740 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2741 xoap->xoa_readonly =
2742 ((zp->z_pflags & ZFS_READONLY) != 0);
2743 XVA_SET_RTN(xvap, XAT_READONLY);
2746 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2748 ((zp->z_pflags & ZFS_SYSTEM) != 0);
2749 XVA_SET_RTN(xvap, XAT_SYSTEM);
2752 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2754 ((zp->z_pflags & ZFS_HIDDEN) != 0);
2755 XVA_SET_RTN(xvap, XAT_HIDDEN);
2758 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2759 xoap->xoa_nounlink =
2760 ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2761 XVA_SET_RTN(xvap, XAT_NOUNLINK);
2764 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2765 xoap->xoa_immutable =
2766 ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2767 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2770 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2771 xoap->xoa_appendonly =
2772 ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2773 XVA_SET_RTN(xvap, XAT_APPENDONLY);
2776 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2778 ((zp->z_pflags & ZFS_NODUMP) != 0);
2779 XVA_SET_RTN(xvap, XAT_NODUMP);
2782 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2784 ((zp->z_pflags & ZFS_OPAQUE) != 0);
2785 XVA_SET_RTN(xvap, XAT_OPAQUE);
2788 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2789 xoap->xoa_av_quarantined =
2790 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2791 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2794 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2795 xoap->xoa_av_modified =
2796 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2797 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2800 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2801 vp->v_type == VREG) {
2802 zfs_sa_get_scanstamp(zp, xvap);
2805 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2806 xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2807 XVA_SET_RTN(xvap, XAT_REPARSE);
2809 if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2810 xoap->xoa_generation = zp->z_gen;
2811 XVA_SET_RTN(xvap, XAT_GEN);
2814 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2816 ((zp->z_pflags & ZFS_OFFLINE) != 0);
2817 XVA_SET_RTN(xvap, XAT_OFFLINE);
2820 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2822 ((zp->z_pflags & ZFS_SPARSE) != 0);
2823 XVA_SET_RTN(xvap, XAT_SPARSE);
2827 ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
2828 ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2829 ZFS_TIME_DECODE(&vap->va_ctime, ctime);
2830 ZFS_TIME_DECODE(&vap->va_birthtime, crtime);
2833 sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
2834 vap->va_blksize = blksize;
2835 vap->va_bytes = nblocks << 9; /* nblocks * 512 */
2837 if (zp->z_blksz == 0) {
2839 * Block size hasn't been set; suggest maximal I/O transfers.
2841 vap->va_blksize = zfsvfs->z_max_blksz;
2849 * Set the file attributes to the values contained in the
2852 * IN: vp - vnode of file to be modified.
2853 * vap - new attribute values.
2854 * If AT_XVATTR set, then optional attrs are being set
2855 * flags - ATTR_UTIME set if non-default time values provided.
2856 * - ATTR_NOACLCHECK (CIFS context only).
2857 * cr - credentials of caller.
2858 * ct - caller context
2860 * RETURN: 0 on success, error code on failure.
2863 * vp - ctime updated, mtime updated if size changed.
2867 zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2868 caller_context_t *ct)
2870 znode_t *zp = VTOZ(vp);
2871 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2876 uint_t mask = vap->va_mask;
2877 uint_t saved_mask = 0;
2878 uint64_t saved_mode;
2881 uint64_t new_uid, new_gid;
2883 uint64_t mtime[2], ctime[2];
2885 int need_policy = FALSE;
2887 zfs_fuid_info_t *fuidp = NULL;
2888 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2891 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2892 boolean_t fuid_dirtied = B_FALSE;
2893 sa_bulk_attr_t bulk[7], xattr_bulk[7];
2894 int count = 0, xattr_count = 0;
2899 if (mask & AT_NOSET)
2900 return (SET_ERROR(EINVAL));
2905 zilog = zfsvfs->z_log;
2908 * Make sure that if we have ephemeral uid/gid or xvattr specified
2909 * that file system is at proper version level
2912 if (zfsvfs->z_use_fuids == B_FALSE &&
2913 (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2914 ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2915 (mask & AT_XVATTR))) {
2917 return (SET_ERROR(EINVAL));
2920 if (mask & AT_SIZE && vp->v_type == VDIR) {
2922 return (SET_ERROR(EISDIR));
2925 if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) {
2927 return (SET_ERROR(EINVAL));
2931 * If this is an xvattr_t, then get a pointer to the structure of
2932 * optional attributes. If this is NULL, then we have a vattr_t.
2934 xoap = xva_getxoptattr(xvap);
2936 xva_init(&tmpxvattr);
2939 * Immutable files can only alter immutable bit and atime
2941 if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2942 ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) ||
2943 ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2945 return (SET_ERROR(EPERM));
2949 * Note: ZFS_READONLY is handled in zfs_zaccess_common.
2953 * Verify timestamps doesn't overflow 32 bits.
2954 * ZFS can handle large timestamps, but 32bit syscalls can't
2955 * handle times greater than 2039. This check should be removed
2956 * once large timestamps are fully supported.
2958 if (mask & (AT_ATIME | AT_MTIME)) {
2959 if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2960 ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2962 return (SET_ERROR(EOVERFLOW));
2965 if (xoap && (mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME) &&
2966 TIMESPEC_OVERFLOW(&vap->va_birthtime)) {
2968 return (SET_ERROR(EOVERFLOW));
2974 /* Can this be moved to before the top label? */
2975 if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
2977 return (SET_ERROR(EROFS));
2981 * First validate permissions
2984 if (mask & AT_SIZE) {
2986 * XXX - Note, we are not providing any open
2987 * mode flags here (like FNDELAY), so we may
2988 * block if there are locks present... this
2989 * should be addressed in openat().
2991 /* XXX - would it be OK to generate a log record here? */
2992 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2999 if (mask & (AT_ATIME|AT_MTIME) ||
3000 ((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
3001 XVA_ISSET_REQ(xvap, XAT_READONLY) ||
3002 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
3003 XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
3004 XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
3005 XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
3006 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
3007 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
3011 if (mask & (AT_UID|AT_GID)) {
3012 int idmask = (mask & (AT_UID|AT_GID));
3017 * NOTE: even if a new mode is being set,
3018 * we may clear S_ISUID/S_ISGID bits.
3021 if (!(mask & AT_MODE))
3022 vap->va_mode = zp->z_mode;
3025 * Take ownership or chgrp to group we are a member of
3028 take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr));
3029 take_group = (mask & AT_GID) &&
3030 zfs_groupmember(zfsvfs, vap->va_gid, cr);
3033 * If both AT_UID and AT_GID are set then take_owner and
3034 * take_group must both be set in order to allow taking
3037 * Otherwise, send the check through secpolicy_vnode_setattr()
3041 if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) ||
3042 ((idmask == AT_UID) && take_owner) ||
3043 ((idmask == AT_GID) && take_group)) {
3044 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
3045 skipaclchk, cr) == 0) {
3047 * Remove setuid/setgid for non-privileged users
3049 secpolicy_setid_clear(vap, vp, cr);
3050 trim_mask = (mask & (AT_UID|AT_GID));
3059 oldva.va_mode = zp->z_mode;
3060 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
3061 if (mask & AT_XVATTR) {
3063 * Update xvattr mask to include only those attributes
3064 * that are actually changing.
3066 * the bits will be restored prior to actually setting
3067 * the attributes so the caller thinks they were set.
3069 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
3070 if (xoap->xoa_appendonly !=
3071 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
3074 XVA_CLR_REQ(xvap, XAT_APPENDONLY);
3075 XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY);
3079 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
3080 if (xoap->xoa_nounlink !=
3081 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
3084 XVA_CLR_REQ(xvap, XAT_NOUNLINK);
3085 XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK);
3089 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
3090 if (xoap->xoa_immutable !=
3091 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
3094 XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
3095 XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE);
3099 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
3100 if (xoap->xoa_nodump !=
3101 ((zp->z_pflags & ZFS_NODUMP) != 0)) {
3104 XVA_CLR_REQ(xvap, XAT_NODUMP);
3105 XVA_SET_REQ(&tmpxvattr, XAT_NODUMP);
3109 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
3110 if (xoap->xoa_av_modified !=
3111 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
3114 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
3115 XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED);
3119 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
3120 if ((vp->v_type != VREG &&
3121 xoap->xoa_av_quarantined) ||
3122 xoap->xoa_av_quarantined !=
3123 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
3126 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
3127 XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED);
3131 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
3133 return (SET_ERROR(EPERM));
3136 if (need_policy == FALSE &&
3137 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
3138 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
3143 if (mask & AT_MODE) {
3144 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
3145 err = secpolicy_setid_setsticky_clear(vp, vap,
3151 trim_mask |= AT_MODE;
3159 * If trim_mask is set then take ownership
3160 * has been granted or write_acl is present and user
3161 * has the ability to modify mode. In that case remove
3162 * UID|GID and or MODE from mask so that
3163 * secpolicy_vnode_setattr() doesn't revoke it.
3167 saved_mask = vap->va_mask;
3168 vap->va_mask &= ~trim_mask;
3169 if (trim_mask & AT_MODE) {
3171 * Save the mode, as secpolicy_vnode_setattr()
3172 * will overwrite it with ova.va_mode.
3174 saved_mode = vap->va_mode;
3177 err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
3178 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
3185 vap->va_mask |= saved_mask;
3186 if (trim_mask & AT_MODE) {
3188 * Recover the mode after
3189 * secpolicy_vnode_setattr().
3191 vap->va_mode = saved_mode;
3197 * secpolicy_vnode_setattr, or take ownership may have
3200 mask = vap->va_mask;
3202 if ((mask & (AT_UID | AT_GID))) {
3203 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
3204 &xattr_obj, sizeof (xattr_obj));
3206 if (err == 0 && xattr_obj) {
3207 err = zfs_zget(zp->z_zfsvfs, xattr_obj, &attrzp);
3209 err = vn_lock(ZTOV(attrzp), LK_EXCLUSIVE);
3211 vrele(ZTOV(attrzp));
3216 if (mask & AT_UID) {
3217 new_uid = zfs_fuid_create(zfsvfs,
3218 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
3219 if (new_uid != zp->z_uid &&
3220 zfs_fuid_overquota(zfsvfs, B_FALSE, new_uid)) {
3223 err = SET_ERROR(EDQUOT);
3228 if (mask & AT_GID) {
3229 new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
3230 cr, ZFS_GROUP, &fuidp);
3231 if (new_gid != zp->z_gid &&
3232 zfs_fuid_overquota(zfsvfs, B_TRUE, new_gid)) {
3235 err = SET_ERROR(EDQUOT);
3240 tx = dmu_tx_create(zfsvfs->z_os);
3242 if (mask & AT_MODE) {
3243 uint64_t pmode = zp->z_mode;
3245 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
3247 if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
3248 !(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
3249 err = SET_ERROR(EPERM);
3253 if (err = zfs_acl_chmod_setattr(zp, &aclp, new_mode))
3256 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
3258 * Are we upgrading ACL from old V0 format
3261 if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
3262 zfs_znode_acl_version(zp) ==
3263 ZFS_ACL_VERSION_INITIAL) {
3264 dmu_tx_hold_free(tx, acl_obj, 0,
3266 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3267 0, aclp->z_acl_bytes);
3269 dmu_tx_hold_write(tx, acl_obj, 0,
3272 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3273 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3274 0, aclp->z_acl_bytes);
3276 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3278 if ((mask & AT_XVATTR) &&
3279 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3280 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3282 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3286 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
3289 fuid_dirtied = zfsvfs->z_fuid_dirty;
3291 zfs_fuid_txhold(zfsvfs, tx);
3293 zfs_sa_upgrade_txholds(tx, zp);
3295 err = dmu_tx_assign(tx, TXG_WAIT);
3301 * Set each attribute requested.
3302 * We group settings according to the locks they need to acquire.
3304 * Note: you cannot set ctime directly, although it will be
3305 * updated as a side-effect of calling this function.
3308 if (mask & (AT_UID|AT_GID|AT_MODE))
3309 mutex_enter(&zp->z_acl_lock);
3311 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
3312 &zp->z_pflags, sizeof (zp->z_pflags));
3315 if (mask & (AT_UID|AT_GID|AT_MODE))
3316 mutex_enter(&attrzp->z_acl_lock);
3317 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3318 SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
3319 sizeof (attrzp->z_pflags));
3322 if (mask & (AT_UID|AT_GID)) {
3324 if (mask & AT_UID) {
3325 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
3326 &new_uid, sizeof (new_uid));
3327 zp->z_uid = new_uid;
3329 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3330 SA_ZPL_UID(zfsvfs), NULL, &new_uid,
3332 attrzp->z_uid = new_uid;
3336 if (mask & AT_GID) {
3337 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
3338 NULL, &new_gid, sizeof (new_gid));
3339 zp->z_gid = new_gid;
3341 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3342 SA_ZPL_GID(zfsvfs), NULL, &new_gid,
3344 attrzp->z_gid = new_gid;
3347 if (!(mask & AT_MODE)) {
3348 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
3349 NULL, &new_mode, sizeof (new_mode));
3350 new_mode = zp->z_mode;
3352 err = zfs_acl_chown_setattr(zp);
3355 err = zfs_acl_chown_setattr(attrzp);
3360 if (mask & AT_MODE) {
3361 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
3362 &new_mode, sizeof (new_mode));
3363 zp->z_mode = new_mode;
3364 ASSERT3U((uintptr_t)aclp, !=, 0);
3365 err = zfs_aclset_common(zp, aclp, cr, tx);
3367 if (zp->z_acl_cached)
3368 zfs_acl_free(zp->z_acl_cached);
3369 zp->z_acl_cached = aclp;
3374 if (mask & AT_ATIME) {
3375 ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
3376 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
3377 &zp->z_atime, sizeof (zp->z_atime));
3380 if (mask & AT_MTIME) {
3381 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
3382 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
3383 mtime, sizeof (mtime));
3386 /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
3387 if (mask & AT_SIZE && !(mask & AT_MTIME)) {
3388 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
3389 NULL, mtime, sizeof (mtime));
3390 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3391 &ctime, sizeof (ctime));
3392 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
3394 } else if (mask != 0) {
3395 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3396 &ctime, sizeof (ctime));
3397 zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime,
3400 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3401 SA_ZPL_CTIME(zfsvfs), NULL,
3402 &ctime, sizeof (ctime));
3403 zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
3404 mtime, ctime, B_TRUE);
3408 * Do this after setting timestamps to prevent timestamp
3409 * update from toggling bit
3412 if (xoap && (mask & AT_XVATTR)) {
3414 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
3415 xoap->xoa_createtime = vap->va_birthtime;
3417 * restore trimmed off masks
3418 * so that return masks can be set for caller.
3421 if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) {
3422 XVA_SET_REQ(xvap, XAT_APPENDONLY);
3424 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) {
3425 XVA_SET_REQ(xvap, XAT_NOUNLINK);
3427 if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) {
3428 XVA_SET_REQ(xvap, XAT_IMMUTABLE);
3430 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) {
3431 XVA_SET_REQ(xvap, XAT_NODUMP);
3433 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) {
3434 XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
3436 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) {
3437 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
3440 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3441 ASSERT(vp->v_type == VREG);
3443 zfs_xvattr_set(zp, xvap, tx);
3447 zfs_fuid_sync(zfsvfs, tx);
3450 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
3452 if (mask & (AT_UID|AT_GID|AT_MODE))
3453 mutex_exit(&zp->z_acl_lock);
3456 if (mask & (AT_UID|AT_GID|AT_MODE))
3457 mutex_exit(&attrzp->z_acl_lock);
3460 if (err == 0 && attrzp) {
3461 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
3473 zfs_fuid_info_free(fuidp);
3480 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
3485 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3486 zil_commit(zilog, 0);
3493 * We acquire all but fdvp locks using non-blocking acquisitions. If we
3494 * fail to acquire any lock in the path we will drop all held locks,
3495 * acquire the new lock in a blocking fashion, and then release it and
3496 * restart the rename. This acquire/release step ensures that we do not
3497 * spin on a lock waiting for release. On error release all vnode locks
3498 * and decrement references the way tmpfs_rename() would do.
3501 zfs_rename_relock(struct vnode *sdvp, struct vnode **svpp,
3502 struct vnode *tdvp, struct vnode **tvpp,
3503 const struct componentname *scnp, const struct componentname *tcnp)
3506 struct vnode *nvp, *svp, *tvp;
3507 znode_t *sdzp, *tdzp, *szp, *tzp;
3508 const char *snm = scnp->cn_nameptr;
3509 const char *tnm = tcnp->cn_nameptr;
3512 VOP_UNLOCK(tdvp, 0);
3513 if (*tvpp != NULL && *tvpp != tdvp)
3514 VOP_UNLOCK(*tvpp, 0);
3517 error = vn_lock(sdvp, LK_EXCLUSIVE);
3522 error = vn_lock(tdvp, LK_EXCLUSIVE | LK_NOWAIT);
3524 VOP_UNLOCK(sdvp, 0);
3527 error = vn_lock(tdvp, LK_EXCLUSIVE);
3530 VOP_UNLOCK(tdvp, 0);
3536 * Before using sdzp and tdzp we must ensure that they are live.
3537 * As a porting legacy from illumos we have two things to worry
3538 * about. One is typical for FreeBSD and it is that the vnode is
3539 * not reclaimed (doomed). The other is that the znode is live.
3540 * The current code can invalidate the znode without acquiring the
3541 * corresponding vnode lock if the object represented by the znode
3542 * and vnode is no longer valid after a rollback or receive operation.
3543 * z_teardown_lock hidden behind ZFS_ENTER and ZFS_EXIT is the lock
3544 * that protects the znodes from the invalidation.
3546 zfsvfs = sdzp->z_zfsvfs;
3547 ASSERT3P(zfsvfs, ==, tdzp->z_zfsvfs);
3551 * We can not use ZFS_VERIFY_ZP() here because it could directly return
3552 * bypassing the cleanup code in the case of an error.
3554 if (tdzp->z_sa_hdl == NULL || sdzp->z_sa_hdl == NULL) {
3556 VOP_UNLOCK(sdvp, 0);
3557 VOP_UNLOCK(tdvp, 0);
3558 error = SET_ERROR(EIO);
3563 * Re-resolve svp to be certain it still exists and fetch the
3566 error = zfs_dirent_lookup(sdzp, snm, &szp, ZEXISTS);
3568 /* Source entry invalid or not there. */
3570 VOP_UNLOCK(sdvp, 0);
3571 VOP_UNLOCK(tdvp, 0);
3572 if ((scnp->cn_flags & ISDOTDOT) != 0 ||
3573 (scnp->cn_namelen == 1 && scnp->cn_nameptr[0] == '.'))
3574 error = SET_ERROR(EINVAL);
3580 * Re-resolve tvp, if it disappeared we just carry on.
3582 error = zfs_dirent_lookup(tdzp, tnm, &tzp, 0);
3585 VOP_UNLOCK(sdvp, 0);
3586 VOP_UNLOCK(tdvp, 0);
3588 if ((tcnp->cn_flags & ISDOTDOT) != 0)
3589 error = SET_ERROR(EINVAL);
3598 * At present the vnode locks must be acquired before z_teardown_lock,
3599 * although it would be more logical to use the opposite order.
3604 * Now try acquire locks on svp and tvp.
3607 error = vn_lock(nvp, LK_EXCLUSIVE | LK_NOWAIT);
3609 VOP_UNLOCK(sdvp, 0);
3610 VOP_UNLOCK(tdvp, 0);
3613 if (error != EBUSY) {
3617 error = vn_lock(nvp, LK_EXCLUSIVE);
3624 * Concurrent rename race.
3629 error = SET_ERROR(EINVAL);
3644 error = vn_lock(nvp, LK_EXCLUSIVE | LK_NOWAIT);
3646 VOP_UNLOCK(sdvp, 0);
3647 VOP_UNLOCK(tdvp, 0);
3648 VOP_UNLOCK(*svpp, 0);
3649 if (error != EBUSY) {
3653 error = vn_lock(nvp, LK_EXCLUSIVE);
3671 * Note that we must use VRELE_ASYNC in this function as it walks
3672 * up the directory tree and vrele may need to acquire an exclusive
3673 * lock if a last reference to a vnode is dropped.
3676 zfs_rename_check(znode_t *szp, znode_t *sdzp, znode_t *tdzp)
3683 zfsvfs = tdzp->z_zfsvfs;
3685 return (SET_ERROR(EINVAL));
3688 if (tdzp->z_id == zfsvfs->z_root)
3692 ASSERT(!zp->z_unlinked);
3693 if ((error = sa_lookup(zp->z_sa_hdl,
3694 SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent))) != 0)
3697 if (parent == szp->z_id) {
3698 error = SET_ERROR(EINVAL);
3701 if (parent == zfsvfs->z_root)
3703 if (parent == sdzp->z_id)
3706 error = zfs_zget(zfsvfs, parent, &zp1);
3711 VN_RELE_ASYNC(ZTOV(zp),
3712 dsl_pool_vnrele_taskq(dmu_objset_pool(zfsvfs->z_os)));
3716 if (error == ENOTDIR)
3717 panic("checkpath: .. not a directory\n");
3719 VN_RELE_ASYNC(ZTOV(zp),
3720 dsl_pool_vnrele_taskq(dmu_objset_pool(zfsvfs->z_os)));
3725 * Move an entry from the provided source directory to the target
3726 * directory. Change the entry name as indicated.
3728 * IN: sdvp - Source directory containing the "old entry".
3729 * snm - Old entry name.
3730 * tdvp - Target directory to contain the "new entry".
3731 * tnm - New entry name.
3732 * cr - credentials of caller.
3733 * ct - caller context
3734 * flags - case flags
3736 * RETURN: 0 on success, error code on failure.
3739 * sdvp,tdvp - ctime|mtime updated
3743 zfs_rename(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
3744 vnode_t *tdvp, vnode_t **tvpp, struct componentname *tcnp,
3748 znode_t *sdzp, *tdzp, *szp, *tzp;
3749 zilog_t *zilog = NULL;
3751 char *snm = scnp->cn_nameptr;
3752 char *tnm = tcnp->cn_nameptr;
3755 /* Reject renames across filesystems. */
3756 if ((*svpp)->v_mount != tdvp->v_mount ||
3757 ((*tvpp) != NULL && (*svpp)->v_mount != (*tvpp)->v_mount)) {
3758 error = SET_ERROR(EXDEV);
3762 if (zfsctl_is_node(tdvp)) {
3763 error = SET_ERROR(EXDEV);
3768 * Lock all four vnodes to ensure safety and semantics of renaming.
3770 error = zfs_rename_relock(sdvp, svpp, tdvp, tvpp, scnp, tcnp);
3772 /* no vnodes are locked in the case of error here */
3778 zfsvfs = tdzp->z_zfsvfs;
3779 zilog = zfsvfs->z_log;
3782 * After we re-enter ZFS_ENTER() we will have to revalidate all
3787 if (zfsvfs->z_utf8 && u8_validate(tnm,
3788 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3789 error = SET_ERROR(EILSEQ);
3793 /* If source and target are the same file, there is nothing to do. */
3794 if ((*svpp) == (*tvpp)) {
3799 if (((*svpp)->v_type == VDIR && (*svpp)->v_mountedhere != NULL) ||
3800 ((*tvpp) != NULL && (*tvpp)->v_type == VDIR &&
3801 (*tvpp)->v_mountedhere != NULL)) {
3802 error = SET_ERROR(EXDEV);
3807 * We can not use ZFS_VERIFY_ZP() here because it could directly return
3808 * bypassing the cleanup code in the case of an error.
3810 if (tdzp->z_sa_hdl == NULL || sdzp->z_sa_hdl == NULL) {
3811 error = SET_ERROR(EIO);
3816 tzp = *tvpp == NULL ? NULL : VTOZ(*tvpp);
3817 if (szp->z_sa_hdl == NULL || (tzp != NULL && tzp->z_sa_hdl == NULL)) {
3818 error = SET_ERROR(EIO);
3823 * This is to prevent the creation of links into attribute space
3824 * by renaming a linked file into/outof an attribute directory.
3825 * See the comment in zfs_link() for why this is considered bad.
3827 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3828 error = SET_ERROR(EINVAL);
3833 * Must have write access at the source to remove the old entry
3834 * and write access at the target to create the new entry.
3835 * Note that if target and source are the same, this can be
3836 * done in a single check.
3838 if (error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr))
3841 if ((*svpp)->v_type == VDIR) {
3843 * Avoid ".", "..", and aliases of "." for obvious reasons.
3845 if ((scnp->cn_namelen == 1 && scnp->cn_nameptr[0] == '.') ||
3847 (scnp->cn_flags | tcnp->cn_flags) & ISDOTDOT) {
3853 * Check to make sure rename is valid.
3854 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3856 if (error = zfs_rename_check(szp, sdzp, tdzp))
3861 * Does target exist?
3865 * Source and target must be the same type.
3867 if ((*svpp)->v_type == VDIR) {
3868 if ((*tvpp)->v_type != VDIR) {
3869 error = SET_ERROR(ENOTDIR);
3877 if ((*tvpp)->v_type == VDIR) {
3878 error = SET_ERROR(EISDIR);
3884 vnevent_rename_src(*svpp, sdvp, scnp->cn_nameptr, ct);
3886 vnevent_rename_dest(*tvpp, tdvp, tnm, ct);
3889 * notify the target directory if it is not the same
3890 * as source directory.
3893 vnevent_rename_dest_dir(tdvp, ct);
3896 tx = dmu_tx_create(zfsvfs->z_os);
3897 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3898 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3899 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3900 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3902 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3903 zfs_sa_upgrade_txholds(tx, tdzp);
3906 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3907 zfs_sa_upgrade_txholds(tx, tzp);
3910 zfs_sa_upgrade_txholds(tx, szp);
3911 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3912 error = dmu_tx_assign(tx, TXG_WAIT);
3919 if (tzp) /* Attempt to remove the existing target */
3920 error = zfs_link_destroy(tdzp, tnm, tzp, tx, 0, NULL);
3923 error = zfs_link_create(tdzp, tnm, szp, tx, ZRENAMING);
3925 szp->z_pflags |= ZFS_AV_MODIFIED;
3927 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
3928 (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3931 error = zfs_link_destroy(sdzp, snm, szp, tx, ZRENAMING,
3934 zfs_log_rename(zilog, tx, TX_RENAME, sdzp,
3935 snm, tdzp, tnm, szp);
3938 * Update path information for the target vnode
3940 vn_renamepath(tdvp, *svpp, tnm, strlen(tnm));
3943 * At this point, we have successfully created
3944 * the target name, but have failed to remove
3945 * the source name. Since the create was done
3946 * with the ZRENAMING flag, there are
3947 * complications; for one, the link count is
3948 * wrong. The easiest way to deal with this
3949 * is to remove the newly created target, and
3950 * return the original error. This must
3951 * succeed; fortunately, it is very unlikely to
3952 * fail, since we just created it.
3954 VERIFY3U(zfs_link_destroy(tdzp, tnm, szp, tx,
3955 ZRENAMING, NULL), ==, 0);
3962 cache_purge_negative(tdvp);
3968 unlockout: /* all 4 vnodes are locked, ZFS_ENTER called */
3970 VOP_UNLOCK(*svpp, 0);
3971 VOP_UNLOCK(sdvp, 0);
3973 out: /* original two vnodes are locked */
3974 if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3975 zil_commit(zilog, 0);
3978 VOP_UNLOCK(*tvpp, 0);
3980 VOP_UNLOCK(tdvp, 0);
3985 * Insert the indicated symbolic reference entry into the directory.
3987 * IN: dvp - Directory to contain new symbolic link.
3988 * link - Name for new symlink entry.
3989 * vap - Attributes of new entry.
3990 * cr - credentials of caller.
3991 * ct - caller context
3992 * flags - case flags
3994 * RETURN: 0 on success, error code on failure.
3997 * dvp - ctime|mtime updated
4001 zfs_symlink(vnode_t *dvp, vnode_t **vpp, char *name, vattr_t *vap, char *link,
4002 cred_t *cr, kthread_t *td)
4004 znode_t *zp, *dzp = VTOZ(dvp);
4006 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
4008 uint64_t len = strlen(link);
4010 zfs_acl_ids_t acl_ids;
4011 boolean_t fuid_dirtied;
4012 uint64_t txtype = TX_SYMLINK;
4015 ASSERT(vap->va_type == VLNK);
4019 zilog = zfsvfs->z_log;
4021 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
4022 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
4024 return (SET_ERROR(EILSEQ));
4027 if (len > MAXPATHLEN) {
4029 return (SET_ERROR(ENAMETOOLONG));
4032 if ((error = zfs_acl_ids_create(dzp, 0,
4033 vap, cr, NULL, &acl_ids)) != 0) {
4039 * Attempt to lock directory; fail if entry already exists.
4041 error = zfs_dirent_lookup(dzp, name, &zp, ZNEW);
4043 zfs_acl_ids_free(&acl_ids);
4048 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
4049 zfs_acl_ids_free(&acl_ids);
4054 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
4055 zfs_acl_ids_free(&acl_ids);
4057 return (SET_ERROR(EDQUOT));
4060 getnewvnode_reserve(1);
4061 tx = dmu_tx_create(zfsvfs->z_os);
4062 fuid_dirtied = zfsvfs->z_fuid_dirty;
4063 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
4064 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4065 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
4066 ZFS_SA_BASE_ATTR_SIZE + len);
4067 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
4068 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
4069 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
4070 acl_ids.z_aclp->z_acl_bytes);
4073 zfs_fuid_txhold(zfsvfs, tx);
4074 error = dmu_tx_assign(tx, TXG_WAIT);
4076 zfs_acl_ids_free(&acl_ids);
4078 getnewvnode_drop_reserve();
4084 * Create a new object for the symlink.
4085 * for version 4 ZPL datsets the symlink will be an SA attribute
4087 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
4090 zfs_fuid_sync(zfsvfs, tx);
4093 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
4096 zfs_sa_symlink(zp, link, len, tx);
4099 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
4100 &zp->z_size, sizeof (zp->z_size), tx);
4102 * Insert the new object into the directory.
4104 (void) zfs_link_create(dzp, name, zp, tx, ZNEW);
4106 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
4109 zfs_acl_ids_free(&acl_ids);
4113 getnewvnode_drop_reserve();
4115 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4116 zil_commit(zilog, 0);
4123 * Return, in the buffer contained in the provided uio structure,
4124 * the symbolic path referred to by vp.
4126 * IN: vp - vnode of symbolic link.
4127 * uio - structure to contain the link path.
4128 * cr - credentials of caller.
4129 * ct - caller context
4131 * OUT: uio - structure containing the link path.
4133 * RETURN: 0 on success, error code on failure.
4136 * vp - atime updated
4140 zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr, caller_context_t *ct)
4142 znode_t *zp = VTOZ(vp);
4143 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4150 error = sa_lookup_uio(zp->z_sa_hdl,
4151 SA_ZPL_SYMLINK(zfsvfs), uio);
4153 error = zfs_sa_readlink(zp, uio);
4155 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
4162 * Insert a new entry into directory tdvp referencing svp.
4164 * IN: tdvp - Directory to contain new entry.
4165 * svp - vnode of new entry.
4166 * name - name of new entry.
4167 * cr - credentials of caller.
4168 * ct - caller context
4170 * RETURN: 0 on success, error code on failure.
4173 * tdvp - ctime|mtime updated
4174 * svp - ctime updated
4178 zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr,
4179 caller_context_t *ct, int flags)
4181 znode_t *dzp = VTOZ(tdvp);
4183 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
4190 ASSERT(tdvp->v_type == VDIR);
4194 zilog = zfsvfs->z_log;
4197 * POSIX dictates that we return EPERM here.
4198 * Better choices include ENOTSUP or EISDIR.
4200 if (svp->v_type == VDIR) {
4202 return (SET_ERROR(EPERM));
4208 if (szp->z_pflags & (ZFS_APPENDONLY | ZFS_IMMUTABLE | ZFS_READONLY)) {
4210 return (SET_ERROR(EPERM));
4213 /* Prevent links to .zfs/shares files */
4215 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
4216 &parent, sizeof (uint64_t))) != 0) {
4220 if (parent == zfsvfs->z_shares_dir) {
4222 return (SET_ERROR(EPERM));
4225 if (zfsvfs->z_utf8 && u8_validate(name,
4226 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
4228 return (SET_ERROR(EILSEQ));
4232 * We do not support links between attributes and non-attributes
4233 * because of the potential security risk of creating links
4234 * into "normal" file space in order to circumvent restrictions
4235 * imposed in attribute space.
4237 if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
4239 return (SET_ERROR(EINVAL));
4243 owner = zfs_fuid_map_id(zfsvfs, szp->z_uid, cr, ZFS_OWNER);
4244 if (owner != crgetuid(cr) && secpolicy_basic_link(svp, cr) != 0) {
4246 return (SET_ERROR(EPERM));
4249 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
4255 * Attempt to lock directory; fail if entry already exists.
4257 error = zfs_dirent_lookup(dzp, name, &tzp, ZNEW);
4263 tx = dmu_tx_create(zfsvfs->z_os);
4264 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
4265 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4266 zfs_sa_upgrade_txholds(tx, szp);
4267 zfs_sa_upgrade_txholds(tx, dzp);
4268 error = dmu_tx_assign(tx, TXG_WAIT);
4275 error = zfs_link_create(dzp, name, szp, tx, 0);
4278 uint64_t txtype = TX_LINK;
4279 zfs_log_link(zilog, tx, txtype, dzp, szp, name);
4285 vnevent_link(svp, ct);
4288 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4289 zil_commit(zilog, 0);
4298 zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
4300 znode_t *zp = VTOZ(vp);
4301 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4304 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
4305 if (zp->z_sa_hdl == NULL) {
4307 * The fs has been unmounted, or we did a
4308 * suspend/resume and this file no longer exists.
4310 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4315 if (zp->z_unlinked) {
4317 * Fast path to recycle a vnode of a removed file.
4319 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4324 if (zp->z_atime_dirty && zp->z_unlinked == 0) {
4325 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
4327 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4328 zfs_sa_upgrade_txholds(tx, zp);
4329 error = dmu_tx_assign(tx, TXG_WAIT);
4333 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
4334 (void *)&zp->z_atime, sizeof (zp->z_atime), tx);
4335 zp->z_atime_dirty = 0;
4339 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4343 CTASSERT(sizeof(struct zfid_short) <= sizeof(struct fid));
4344 CTASSERT(sizeof(struct zfid_long) <= sizeof(struct fid));
4348 zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
4350 znode_t *zp = VTOZ(vp);
4351 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4354 uint64_t object = zp->z_id;
4361 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
4362 &gen64, sizeof (uint64_t))) != 0) {
4367 gen = (uint32_t)gen64;
4369 size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN;
4372 if (fidp->fid_len < size) {
4373 fidp->fid_len = size;
4375 return (SET_ERROR(ENOSPC));
4378 fidp->fid_len = size;
4381 zfid = (zfid_short_t *)fidp;
4383 zfid->zf_len = size;
4385 for (i = 0; i < sizeof (zfid->zf_object); i++)
4386 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
4388 /* Must have a non-zero generation number to distinguish from .zfs */
4391 for (i = 0; i < sizeof (zfid->zf_gen); i++)
4392 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
4394 if (size == LONG_FID_LEN) {
4395 uint64_t objsetid = dmu_objset_id(zfsvfs->z_os);
4398 zlfid = (zfid_long_t *)fidp;
4400 for (i = 0; i < sizeof (zlfid->zf_setid); i++)
4401 zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
4403 /* XXX - this should be the generation number for the objset */
4404 for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
4405 zlfid->zf_setgen[i] = 0;
4413 zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
4414 caller_context_t *ct)
4425 case _PC_FILESIZEBITS:
4429 case _PC_XATTR_EXISTS:
4431 zfsvfs = zp->z_zfsvfs;
4435 error = zfs_dirent_lookup(zp, "", &xzp,
4436 ZXATTR | ZEXISTS | ZSHARED);
4438 if (!zfs_dirempty(xzp))
4441 } else if (error == ENOENT) {
4443 * If there aren't extended attributes, it's the
4444 * same as having zero of them.
4451 case _PC_SATTR_ENABLED:
4452 case _PC_SATTR_EXISTS:
4453 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
4454 (vp->v_type == VREG || vp->v_type == VDIR);
4457 case _PC_ACCESS_FILTERING:
4458 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_ACCESS_FILTER) &&
4462 case _PC_ACL_ENABLED:
4463 *valp = _ACL_ACE_ENABLED;
4465 #endif /* illumos */
4466 case _PC_MIN_HOLE_SIZE:
4467 *valp = (int)SPA_MINBLOCKSIZE;
4470 case _PC_TIMESTAMP_RESOLUTION:
4471 /* nanosecond timestamp resolution */
4475 case _PC_ACL_EXTENDED:
4483 case _PC_ACL_PATH_MAX:
4484 *valp = ACL_MAX_ENTRIES;
4488 return (EOPNOTSUPP);
4494 zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
4495 caller_context_t *ct)
4497 znode_t *zp = VTOZ(vp);
4498 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4500 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4504 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
4512 zfs_setsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
4513 caller_context_t *ct)
4515 znode_t *zp = VTOZ(vp);
4516 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4518 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4519 zilog_t *zilog = zfsvfs->z_log;
4524 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
4526 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4527 zil_commit(zilog, 0);
4534 ioflags(int ioflags)
4538 if (ioflags & IO_APPEND)
4540 if (ioflags & IO_NDELAY)
4542 if (ioflags & IO_SYNC)
4543 flags |= (FSYNC | FDSYNC | FRSYNC);
4549 zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int *rbehind,
4552 znode_t *zp = VTOZ(vp);
4553 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4554 objset_t *os = zp->z_zfsvfs->z_os;
4559 off_t startoff, endoff;
4561 vm_pindex_t reqstart, reqend;
4564 object = m[0]->object;
4570 zfs_vmobject_wlock(object);
4571 if (m[count - 1]->valid != 0 && --count == 0) {
4572 zfs_vmobject_wunlock(object);
4576 mlast = m[count - 1];
4578 if (IDX_TO_OFF(mlast->pindex) >=
4579 object->un_pager.vnp.vnp_size) {
4580 zfs_vmobject_wunlock(object);
4582 return (zfs_vm_pagerret_bad);
4585 VM_CNT_INC(v_vnodein);
4586 VM_CNT_ADD(v_vnodepgsin, count);
4589 if (IDX_TO_OFF(mlast->pindex) + lsize > object->un_pager.vnp.vnp_size)
4590 lsize = object->un_pager.vnp.vnp_size -
4591 IDX_TO_OFF(mlast->pindex);
4592 zfs_vmobject_wunlock(object);
4594 for (i = 0; i < count; i++) {
4598 va = zfs_map_page(m[i], &sf);
4599 error = dmu_read(os, zp->z_id, IDX_TO_OFF(m[i]->pindex),
4600 size, va, DMU_READ_PREFETCH);
4601 if (size != PAGE_SIZE)
4602 bzero(va + size, PAGE_SIZE - size);
4608 zfs_vmobject_wlock(object);
4609 for (i = 0; i < count; i++)
4610 m[i]->valid = VM_PAGE_BITS_ALL;
4611 zfs_vmobject_wunlock(object);
4614 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
4621 return (zfs_vm_pagerret_ok);
4623 return (zfs_vm_pagerret_error);
4627 zfs_freebsd_getpages(ap)
4628 struct vop_getpages_args /* {
4637 return (zfs_getpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind,
4642 zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
4645 znode_t *zp = VTOZ(vp);
4646 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4655 vm_ooffset_t lo_off;
4666 object = vp->v_object;
4670 KASSERT(ma[0]->object == object, ("mismatching object"));
4671 KASSERT(len > 0 && (len & PAGE_MASK) == 0, ("unexpected length"));
4673 for (i = 0; i < pcount; i++)
4674 rtvals[i] = zfs_vm_pagerret_error;
4676 off = IDX_TO_OFF(ma[0]->pindex);
4677 blksz = zp->z_blksz;
4678 lo_off = rounddown(off, blksz);
4679 lo_len = roundup(len + (off - lo_off), blksz);
4680 rl = zfs_range_lock(zp, lo_off, lo_len, RL_WRITER);
4682 zfs_vmobject_wlock(object);
4683 if (len + off > object->un_pager.vnp.vnp_size) {
4684 if (object->un_pager.vnp.vnp_size > off) {
4687 len = object->un_pager.vnp.vnp_size - off;
4689 if ((pgoff = (int)len & PAGE_MASK) != 0) {
4691 * If the object is locked and the following
4692 * conditions hold, then the page's dirty
4693 * field cannot be concurrently changed by a
4697 vm_page_assert_sbusied(m);
4698 KASSERT(!pmap_page_is_write_mapped(m),
4699 ("zfs_putpages: page %p is not read-only", m));
4700 vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
4707 if (ncount < pcount) {
4708 for (i = ncount; i < pcount; i++) {
4709 rtvals[i] = zfs_vm_pagerret_bad;
4713 zfs_vmobject_wunlock(object);
4718 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
4719 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
4723 tx = dmu_tx_create(zfsvfs->z_os);
4724 dmu_tx_hold_write(tx, zp->z_id, off, len);
4726 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4727 zfs_sa_upgrade_txholds(tx, zp);
4728 err = dmu_tx_assign(tx, TXG_WAIT);
4734 if (zp->z_blksz < PAGE_SIZE) {
4736 for (i = 0; len > 0; off += tocopy, len -= tocopy, i++) {
4737 tocopy = len > PAGE_SIZE ? PAGE_SIZE : len;
4738 va = zfs_map_page(ma[i], &sf);
4739 dmu_write(zfsvfs->z_os, zp->z_id, off, tocopy, va, tx);
4743 err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, ma, tx);
4747 uint64_t mtime[2], ctime[2];
4748 sa_bulk_attr_t bulk[3];
4751 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
4753 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
4755 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
4757 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
4759 err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
4761 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0);
4763 zfs_vmobject_wlock(object);
4764 for (i = 0; i < ncount; i++) {
4765 rtvals[i] = zfs_vm_pagerret_ok;
4766 vm_page_undirty(ma[i]);
4768 zfs_vmobject_wunlock(object);
4769 VM_CNT_INC(v_vnodeout);
4770 VM_CNT_ADD(v_vnodepgsout, ncount);
4775 zfs_range_unlock(rl);
4776 if ((flags & (zfs_vm_pagerput_sync | zfs_vm_pagerput_inval)) != 0 ||
4777 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4778 zil_commit(zfsvfs->z_log, zp->z_id);
4784 zfs_freebsd_putpages(ap)
4785 struct vop_putpages_args /* {
4794 return (zfs_putpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_sync,
4799 zfs_freebsd_bmap(ap)
4800 struct vop_bmap_args /* {
4803 struct bufobj **a_bop;
4810 if (ap->a_bop != NULL)
4811 *ap->a_bop = &ap->a_vp->v_bufobj;
4812 if (ap->a_bnp != NULL)
4813 *ap->a_bnp = ap->a_bn;
4814 if (ap->a_runp != NULL)
4816 if (ap->a_runb != NULL)
4823 zfs_freebsd_open(ap)
4824 struct vop_open_args /* {
4827 struct ucred *a_cred;
4828 struct thread *a_td;
4831 vnode_t *vp = ap->a_vp;
4832 znode_t *zp = VTOZ(vp);
4835 error = zfs_open(&vp, ap->a_mode, ap->a_cred, NULL);
4837 vnode_create_vobject(vp, zp->z_size, ap->a_td);
4842 zfs_freebsd_close(ap)
4843 struct vop_close_args /* {
4846 struct ucred *a_cred;
4847 struct thread *a_td;
4851 return (zfs_close(ap->a_vp, ap->a_fflag, 1, 0, ap->a_cred, NULL));
4855 zfs_freebsd_ioctl(ap)
4856 struct vop_ioctl_args /* {
4866 return (zfs_ioctl(ap->a_vp, ap->a_command, (intptr_t)ap->a_data,
4867 ap->a_fflag, ap->a_cred, NULL, NULL));
4871 zfs_freebsd_read(ap)
4872 struct vop_read_args /* {
4876 struct ucred *a_cred;
4880 return (zfs_read(ap->a_vp, ap->a_uio, ioflags(ap->a_ioflag),
4885 zfs_freebsd_write(ap)
4886 struct vop_write_args /* {
4890 struct ucred *a_cred;
4894 return (zfs_write(ap->a_vp, ap->a_uio, ioflags(ap->a_ioflag),
4899 zfs_freebsd_access(ap)
4900 struct vop_access_args /* {
4902 accmode_t a_accmode;
4903 struct ucred *a_cred;
4904 struct thread *a_td;
4907 vnode_t *vp = ap->a_vp;
4908 znode_t *zp = VTOZ(vp);
4913 * ZFS itself only knowns about VREAD, VWRITE, VEXEC and VAPPEND,
4915 accmode = ap->a_accmode & (VREAD|VWRITE|VEXEC|VAPPEND);
4917 error = zfs_access(ap->a_vp, accmode, 0, ap->a_cred, NULL);
4920 * VADMIN has to be handled by vaccess().
4923 accmode = ap->a_accmode & ~(VREAD|VWRITE|VEXEC|VAPPEND);
4925 error = vaccess(vp->v_type, zp->z_mode, zp->z_uid,
4926 zp->z_gid, accmode, ap->a_cred, NULL);
4931 * For VEXEC, ensure that at least one execute bit is set for
4934 if (error == 0 && (ap->a_accmode & VEXEC) != 0 && vp->v_type != VDIR &&
4935 (zp->z_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
4943 zfs_freebsd_lookup(ap)
4944 struct vop_lookup_args /* {
4945 struct vnode *a_dvp;
4946 struct vnode **a_vpp;
4947 struct componentname *a_cnp;
4950 struct componentname *cnp = ap->a_cnp;
4951 char nm[NAME_MAX + 1];
4953 ASSERT(cnp->cn_namelen < sizeof(nm));
4954 strlcpy(nm, cnp->cn_nameptr, MIN(cnp->cn_namelen + 1, sizeof(nm)));
4956 return (zfs_lookup(ap->a_dvp, nm, ap->a_vpp, cnp, cnp->cn_nameiop,
4957 cnp->cn_cred, cnp->cn_thread, 0));
4961 zfs_cache_lookup(ap)
4962 struct vop_lookup_args /* {
4963 struct vnode *a_dvp;
4964 struct vnode **a_vpp;
4965 struct componentname *a_cnp;
4970 zfsvfs = ap->a_dvp->v_mount->mnt_data;
4971 if (zfsvfs->z_use_namecache)
4972 return (vfs_cache_lookup(ap));
4974 return (zfs_freebsd_lookup(ap));
4978 zfs_freebsd_create(ap)
4979 struct vop_create_args /* {
4980 struct vnode *a_dvp;
4981 struct vnode **a_vpp;
4982 struct componentname *a_cnp;
4983 struct vattr *a_vap;
4987 struct componentname *cnp = ap->a_cnp;
4988 vattr_t *vap = ap->a_vap;
4991 ASSERT(cnp->cn_flags & SAVENAME);
4993 vattr_init_mask(vap);
4994 mode = vap->va_mode & ALLPERMS;
4995 zfsvfs = ap->a_dvp->v_mount->mnt_data;
4997 error = zfs_create(ap->a_dvp, cnp->cn_nameptr, vap, !EXCL, mode,
4998 ap->a_vpp, cnp->cn_cred, cnp->cn_thread);
4999 if (zfsvfs->z_use_namecache &&
5000 error == 0 && (cnp->cn_flags & MAKEENTRY) != 0)
5001 cache_enter(ap->a_dvp, *ap->a_vpp, cnp);
5006 zfs_freebsd_remove(ap)
5007 struct vop_remove_args /* {
5008 struct vnode *a_dvp;
5010 struct componentname *a_cnp;
5014 ASSERT(ap->a_cnp->cn_flags & SAVENAME);
5016 return (zfs_remove(ap->a_dvp, ap->a_vp, ap->a_cnp->cn_nameptr,
5017 ap->a_cnp->cn_cred));
5021 zfs_freebsd_mkdir(ap)
5022 struct vop_mkdir_args /* {
5023 struct vnode *a_dvp;
5024 struct vnode **a_vpp;
5025 struct componentname *a_cnp;
5026 struct vattr *a_vap;
5029 vattr_t *vap = ap->a_vap;
5031 ASSERT(ap->a_cnp->cn_flags & SAVENAME);
5033 vattr_init_mask(vap);
5035 return (zfs_mkdir(ap->a_dvp, ap->a_cnp->cn_nameptr, vap, ap->a_vpp,
5036 ap->a_cnp->cn_cred));
5040 zfs_freebsd_rmdir(ap)
5041 struct vop_rmdir_args /* {
5042 struct vnode *a_dvp;
5044 struct componentname *a_cnp;
5047 struct componentname *cnp = ap->a_cnp;
5049 ASSERT(cnp->cn_flags & SAVENAME);
5051 return (zfs_rmdir(ap->a_dvp, ap->a_vp, cnp->cn_nameptr, cnp->cn_cred));
5055 zfs_freebsd_readdir(ap)
5056 struct vop_readdir_args /* {
5059 struct ucred *a_cred;
5066 return (zfs_readdir(ap->a_vp, ap->a_uio, ap->a_cred, ap->a_eofflag,
5067 ap->a_ncookies, ap->a_cookies));
5071 zfs_freebsd_fsync(ap)
5072 struct vop_fsync_args /* {
5075 struct thread *a_td;
5080 return (zfs_fsync(ap->a_vp, 0, ap->a_td->td_ucred, NULL));
5084 zfs_freebsd_getattr(ap)
5085 struct vop_getattr_args /* {
5087 struct vattr *a_vap;
5088 struct ucred *a_cred;
5091 vattr_t *vap = ap->a_vap;
5097 xvap.xva_vattr = *vap;
5098 xvap.xva_vattr.va_mask |= AT_XVATTR;
5100 /* Convert chflags into ZFS-type flags. */
5101 /* XXX: what about SF_SETTABLE?. */
5102 XVA_SET_REQ(&xvap, XAT_IMMUTABLE);
5103 XVA_SET_REQ(&xvap, XAT_APPENDONLY);
5104 XVA_SET_REQ(&xvap, XAT_NOUNLINK);
5105 XVA_SET_REQ(&xvap, XAT_NODUMP);
5106 XVA_SET_REQ(&xvap, XAT_READONLY);
5107 XVA_SET_REQ(&xvap, XAT_ARCHIVE);
5108 XVA_SET_REQ(&xvap, XAT_SYSTEM);
5109 XVA_SET_REQ(&xvap, XAT_HIDDEN);
5110 XVA_SET_REQ(&xvap, XAT_REPARSE);
5111 XVA_SET_REQ(&xvap, XAT_OFFLINE);
5112 XVA_SET_REQ(&xvap, XAT_SPARSE);
5114 error = zfs_getattr(ap->a_vp, (vattr_t *)&xvap, 0, ap->a_cred, NULL);
5118 /* Convert ZFS xattr into chflags. */
5119 #define FLAG_CHECK(fflag, xflag, xfield) do { \
5120 if (XVA_ISSET_RTN(&xvap, (xflag)) && (xfield) != 0) \
5121 fflags |= (fflag); \
5123 FLAG_CHECK(SF_IMMUTABLE, XAT_IMMUTABLE,
5124 xvap.xva_xoptattrs.xoa_immutable);
5125 FLAG_CHECK(SF_APPEND, XAT_APPENDONLY,
5126 xvap.xva_xoptattrs.xoa_appendonly);
5127 FLAG_CHECK(SF_NOUNLINK, XAT_NOUNLINK,
5128 xvap.xva_xoptattrs.xoa_nounlink);
5129 FLAG_CHECK(UF_ARCHIVE, XAT_ARCHIVE,
5130 xvap.xva_xoptattrs.xoa_archive);
5131 FLAG_CHECK(UF_NODUMP, XAT_NODUMP,
5132 xvap.xva_xoptattrs.xoa_nodump);
5133 FLAG_CHECK(UF_READONLY, XAT_READONLY,
5134 xvap.xva_xoptattrs.xoa_readonly);
5135 FLAG_CHECK(UF_SYSTEM, XAT_SYSTEM,
5136 xvap.xva_xoptattrs.xoa_system);
5137 FLAG_CHECK(UF_HIDDEN, XAT_HIDDEN,
5138 xvap.xva_xoptattrs.xoa_hidden);
5139 FLAG_CHECK(UF_REPARSE, XAT_REPARSE,
5140 xvap.xva_xoptattrs.xoa_reparse);
5141 FLAG_CHECK(UF_OFFLINE, XAT_OFFLINE,
5142 xvap.xva_xoptattrs.xoa_offline);
5143 FLAG_CHECK(UF_SPARSE, XAT_SPARSE,
5144 xvap.xva_xoptattrs.xoa_sparse);
5147 *vap = xvap.xva_vattr;
5148 vap->va_flags = fflags;
5153 zfs_freebsd_setattr(ap)
5154 struct vop_setattr_args /* {
5156 struct vattr *a_vap;
5157 struct ucred *a_cred;
5160 vnode_t *vp = ap->a_vp;
5161 vattr_t *vap = ap->a_vap;
5162 cred_t *cred = ap->a_cred;
5167 vattr_init_mask(vap);
5168 vap->va_mask &= ~AT_NOSET;
5171 xvap.xva_vattr = *vap;
5173 zflags = VTOZ(vp)->z_pflags;
5175 if (vap->va_flags != VNOVAL) {
5176 zfsvfs_t *zfsvfs = VTOZ(vp)->z_zfsvfs;
5179 if (zfsvfs->z_use_fuids == B_FALSE)
5180 return (EOPNOTSUPP);
5182 fflags = vap->va_flags;
5185 * We need to figure out whether it makes sense to allow
5186 * UF_REPARSE through, since we don't really have other
5187 * facilities to handle reparse points and zfs_setattr()
5188 * doesn't currently allow setting that attribute anyway.
5190 if ((fflags & ~(SF_IMMUTABLE|SF_APPEND|SF_NOUNLINK|UF_ARCHIVE|
5191 UF_NODUMP|UF_SYSTEM|UF_HIDDEN|UF_READONLY|UF_REPARSE|
5192 UF_OFFLINE|UF_SPARSE)) != 0)
5193 return (EOPNOTSUPP);
5195 * Unprivileged processes are not permitted to unset system
5196 * flags, or modify flags if any system flags are set.
5197 * Privileged non-jail processes may not modify system flags
5198 * if securelevel > 0 and any existing system flags are set.
5199 * Privileged jail processes behave like privileged non-jail
5200 * processes if the security.jail.chflags_allowed sysctl is
5201 * is non-zero; otherwise, they behave like unprivileged
5204 if (secpolicy_fs_owner(vp->v_mount, cred) == 0 ||
5205 priv_check_cred(cred, PRIV_VFS_SYSFLAGS, 0) == 0) {
5207 (ZFS_IMMUTABLE | ZFS_APPENDONLY | ZFS_NOUNLINK)) {
5208 error = securelevel_gt(cred, 0);
5214 * Callers may only modify the file flags on objects they
5215 * have VADMIN rights for.
5217 if ((error = VOP_ACCESS(vp, VADMIN, cred, curthread)) != 0)
5220 (ZFS_IMMUTABLE | ZFS_APPENDONLY | ZFS_NOUNLINK)) {
5224 (SF_IMMUTABLE | SF_APPEND | SF_NOUNLINK)) {
5229 #define FLAG_CHANGE(fflag, zflag, xflag, xfield) do { \
5230 if (((fflags & (fflag)) && !(zflags & (zflag))) || \
5231 ((zflags & (zflag)) && !(fflags & (fflag)))) { \
5232 XVA_SET_REQ(&xvap, (xflag)); \
5233 (xfield) = ((fflags & (fflag)) != 0); \
5236 /* Convert chflags into ZFS-type flags. */
5237 /* XXX: what about SF_SETTABLE?. */
5238 FLAG_CHANGE(SF_IMMUTABLE, ZFS_IMMUTABLE, XAT_IMMUTABLE,
5239 xvap.xva_xoptattrs.xoa_immutable);
5240 FLAG_CHANGE(SF_APPEND, ZFS_APPENDONLY, XAT_APPENDONLY,
5241 xvap.xva_xoptattrs.xoa_appendonly);
5242 FLAG_CHANGE(SF_NOUNLINK, ZFS_NOUNLINK, XAT_NOUNLINK,
5243 xvap.xva_xoptattrs.xoa_nounlink);
5244 FLAG_CHANGE(UF_ARCHIVE, ZFS_ARCHIVE, XAT_ARCHIVE,
5245 xvap.xva_xoptattrs.xoa_archive);
5246 FLAG_CHANGE(UF_NODUMP, ZFS_NODUMP, XAT_NODUMP,
5247 xvap.xva_xoptattrs.xoa_nodump);
5248 FLAG_CHANGE(UF_READONLY, ZFS_READONLY, XAT_READONLY,
5249 xvap.xva_xoptattrs.xoa_readonly);
5250 FLAG_CHANGE(UF_SYSTEM, ZFS_SYSTEM, XAT_SYSTEM,
5251 xvap.xva_xoptattrs.xoa_system);
5252 FLAG_CHANGE(UF_HIDDEN, ZFS_HIDDEN, XAT_HIDDEN,
5253 xvap.xva_xoptattrs.xoa_hidden);
5254 FLAG_CHANGE(UF_REPARSE, ZFS_REPARSE, XAT_REPARSE,
5255 xvap.xva_xoptattrs.xoa_hidden);
5256 FLAG_CHANGE(UF_OFFLINE, ZFS_OFFLINE, XAT_OFFLINE,
5257 xvap.xva_xoptattrs.xoa_offline);
5258 FLAG_CHANGE(UF_SPARSE, ZFS_SPARSE, XAT_SPARSE,
5259 xvap.xva_xoptattrs.xoa_sparse);
5262 if (vap->va_birthtime.tv_sec != VNOVAL) {
5263 xvap.xva_vattr.va_mask |= AT_XVATTR;
5264 XVA_SET_REQ(&xvap, XAT_CREATETIME);
5266 return (zfs_setattr(vp, (vattr_t *)&xvap, 0, cred, NULL));
5270 zfs_freebsd_rename(ap)
5271 struct vop_rename_args /* {
5272 struct vnode *a_fdvp;
5273 struct vnode *a_fvp;
5274 struct componentname *a_fcnp;
5275 struct vnode *a_tdvp;
5276 struct vnode *a_tvp;
5277 struct componentname *a_tcnp;
5280 vnode_t *fdvp = ap->a_fdvp;
5281 vnode_t *fvp = ap->a_fvp;
5282 vnode_t *tdvp = ap->a_tdvp;
5283 vnode_t *tvp = ap->a_tvp;
5286 ASSERT(ap->a_fcnp->cn_flags & (SAVENAME|SAVESTART));
5287 ASSERT(ap->a_tcnp->cn_flags & (SAVENAME|SAVESTART));
5289 error = zfs_rename(fdvp, &fvp, ap->a_fcnp, tdvp, &tvp,
5290 ap->a_tcnp, ap->a_fcnp->cn_cred);
5302 zfs_freebsd_symlink(ap)
5303 struct vop_symlink_args /* {
5304 struct vnode *a_dvp;
5305 struct vnode **a_vpp;
5306 struct componentname *a_cnp;
5307 struct vattr *a_vap;
5311 struct componentname *cnp = ap->a_cnp;
5312 vattr_t *vap = ap->a_vap;
5314 ASSERT(cnp->cn_flags & SAVENAME);
5316 vap->va_type = VLNK; /* FreeBSD: Syscall only sets va_mode. */
5317 vattr_init_mask(vap);
5319 return (zfs_symlink(ap->a_dvp, ap->a_vpp, cnp->cn_nameptr, vap,
5320 ap->a_target, cnp->cn_cred, cnp->cn_thread));
5324 zfs_freebsd_readlink(ap)
5325 struct vop_readlink_args /* {
5328 struct ucred *a_cred;
5332 return (zfs_readlink(ap->a_vp, ap->a_uio, ap->a_cred, NULL));
5336 zfs_freebsd_link(ap)
5337 struct vop_link_args /* {
5338 struct vnode *a_tdvp;
5340 struct componentname *a_cnp;
5343 struct componentname *cnp = ap->a_cnp;
5344 vnode_t *vp = ap->a_vp;
5345 vnode_t *tdvp = ap->a_tdvp;
5347 if (tdvp->v_mount != vp->v_mount)
5350 ASSERT(cnp->cn_flags & SAVENAME);
5352 return (zfs_link(tdvp, vp, cnp->cn_nameptr, cnp->cn_cred, NULL, 0));
5356 zfs_freebsd_inactive(ap)
5357 struct vop_inactive_args /* {
5359 struct thread *a_td;
5362 vnode_t *vp = ap->a_vp;
5364 zfs_inactive(vp, ap->a_td->td_ucred, NULL);
5369 zfs_freebsd_reclaim(ap)
5370 struct vop_reclaim_args /* {
5372 struct thread *a_td;
5375 vnode_t *vp = ap->a_vp;
5376 znode_t *zp = VTOZ(vp);
5377 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5381 /* Destroy the vm object and flush associated pages. */
5382 vnode_destroy_vobject(vp);
5385 * z_teardown_inactive_lock protects from a race with
5386 * zfs_znode_dmu_fini in zfsvfs_teardown during
5389 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
5390 if (zp->z_sa_hdl == NULL)
5394 rw_exit(&zfsvfs->z_teardown_inactive_lock);
5402 struct vop_fid_args /* {
5408 return (zfs_fid(ap->a_vp, (void *)ap->a_fid, NULL));
5412 zfs_freebsd_pathconf(ap)
5413 struct vop_pathconf_args /* {
5416 register_t *a_retval;
5422 error = zfs_pathconf(ap->a_vp, ap->a_name, &val, curthread->td_ucred, NULL);
5424 *ap->a_retval = val;
5425 else if (error == EOPNOTSUPP)
5426 error = vop_stdpathconf(ap);
5431 zfs_freebsd_fifo_pathconf(ap)
5432 struct vop_pathconf_args /* {
5435 register_t *a_retval;
5439 switch (ap->a_name) {
5440 case _PC_ACL_EXTENDED:
5442 case _PC_ACL_PATH_MAX:
5443 case _PC_MAC_PRESENT:
5444 return (zfs_freebsd_pathconf(ap));
5446 return (fifo_specops.vop_pathconf(ap));
5451 * FreeBSD's extended attributes namespace defines file name prefix for ZFS'
5452 * extended attribute name:
5455 * system freebsd:system:
5456 * user (none, can be used to access ZFS fsattr(5) attributes
5457 * created on Solaris)
5460 zfs_create_attrname(int attrnamespace, const char *name, char *attrname,
5463 const char *namespace, *prefix, *suffix;
5465 /* We don't allow '/' character in attribute name. */
5466 if (strchr(name, '/') != NULL)
5468 /* We don't allow attribute names that start with "freebsd:" string. */
5469 if (strncmp(name, "freebsd:", 8) == 0)
5472 bzero(attrname, size);
5474 switch (attrnamespace) {
5475 case EXTATTR_NAMESPACE_USER:
5477 prefix = "freebsd:";
5478 namespace = EXTATTR_NAMESPACE_USER_STRING;
5482 * This is the default namespace by which we can access all
5483 * attributes created on Solaris.
5485 prefix = namespace = suffix = "";
5488 case EXTATTR_NAMESPACE_SYSTEM:
5489 prefix = "freebsd:";
5490 namespace = EXTATTR_NAMESPACE_SYSTEM_STRING;
5493 case EXTATTR_NAMESPACE_EMPTY:
5497 if (snprintf(attrname, size, "%s%s%s%s", prefix, namespace, suffix,
5499 return (ENAMETOOLONG);
5505 * Vnode operating to retrieve a named extended attribute.
5508 zfs_getextattr(struct vop_getextattr_args *ap)
5511 IN struct vnode *a_vp;
5512 IN int a_attrnamespace;
5513 IN const char *a_name;
5514 INOUT struct uio *a_uio;
5516 IN struct ucred *a_cred;
5517 IN struct thread *a_td;
5521 zfsvfs_t *zfsvfs = VTOZ(ap->a_vp)->z_zfsvfs;
5522 struct thread *td = ap->a_td;
5523 struct nameidata nd;
5526 vnode_t *xvp = NULL, *vp;
5529 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
5530 ap->a_cred, ap->a_td, VREAD);
5534 error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
5541 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
5549 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname,
5551 error = vn_open_cred(&nd, &flags, 0, 0, ap->a_cred, NULL);
5553 NDFREE(&nd, NDF_ONLY_PNBUF);
5556 if (error == ENOENT)
5561 if (ap->a_size != NULL) {
5562 error = VOP_GETATTR(vp, &va, ap->a_cred);
5564 *ap->a_size = (size_t)va.va_size;
5565 } else if (ap->a_uio != NULL)
5566 error = VOP_READ(vp, ap->a_uio, IO_UNIT, ap->a_cred);
5569 vn_close(vp, flags, ap->a_cred, td);
5576 * Vnode operation to remove a named attribute.
5579 zfs_deleteextattr(struct vop_deleteextattr_args *ap)
5582 IN struct vnode *a_vp;
5583 IN int a_attrnamespace;
5584 IN const char *a_name;
5585 IN struct ucred *a_cred;
5586 IN struct thread *a_td;
5590 zfsvfs_t *zfsvfs = VTOZ(ap->a_vp)->z_zfsvfs;
5591 struct thread *td = ap->a_td;
5592 struct nameidata nd;
5595 vnode_t *xvp = NULL, *vp;
5598 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
5599 ap->a_cred, ap->a_td, VWRITE);
5603 error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
5610 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
5617 NDINIT_ATVP(&nd, DELETE, NOFOLLOW | LOCKPARENT | LOCKLEAF,
5618 UIO_SYSSPACE, attrname, xvp, td);
5623 NDFREE(&nd, NDF_ONLY_PNBUF);
5624 if (error == ENOENT)
5629 error = VOP_REMOVE(nd.ni_dvp, vp, &nd.ni_cnd);
5630 NDFREE(&nd, NDF_ONLY_PNBUF);
5633 if (vp == nd.ni_dvp)
5643 * Vnode operation to set a named attribute.
5646 zfs_setextattr(struct vop_setextattr_args *ap)
5649 IN struct vnode *a_vp;
5650 IN int a_attrnamespace;
5651 IN const char *a_name;
5652 INOUT struct uio *a_uio;
5653 IN struct ucred *a_cred;
5654 IN struct thread *a_td;
5658 zfsvfs_t *zfsvfs = VTOZ(ap->a_vp)->z_zfsvfs;
5659 struct thread *td = ap->a_td;
5660 struct nameidata nd;
5663 vnode_t *xvp = NULL, *vp;
5666 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
5667 ap->a_cred, ap->a_td, VWRITE);
5671 error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
5678 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
5679 LOOKUP_XATTR | CREATE_XATTR_DIR);
5685 flags = FFLAGS(O_WRONLY | O_CREAT);
5686 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname,
5688 error = vn_open_cred(&nd, &flags, 0600, 0, ap->a_cred, NULL);
5690 NDFREE(&nd, NDF_ONLY_PNBUF);
5698 error = VOP_SETATTR(vp, &va, ap->a_cred);
5700 VOP_WRITE(vp, ap->a_uio, IO_UNIT, ap->a_cred);
5703 vn_close(vp, flags, ap->a_cred, td);
5710 * Vnode operation to retrieve extended attributes on a vnode.
5713 zfs_listextattr(struct vop_listextattr_args *ap)
5716 IN struct vnode *a_vp;
5717 IN int a_attrnamespace;
5718 INOUT struct uio *a_uio;
5720 IN struct ucred *a_cred;
5721 IN struct thread *a_td;
5725 zfsvfs_t *zfsvfs = VTOZ(ap->a_vp)->z_zfsvfs;
5726 struct thread *td = ap->a_td;
5727 struct nameidata nd;
5728 char attrprefix[16];
5729 u_char dirbuf[sizeof(struct dirent)];
5732 struct uio auio, *uio = ap->a_uio;
5733 size_t *sizep = ap->a_size;
5735 vnode_t *xvp = NULL, *vp;
5736 int done, error, eof, pos;
5738 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
5739 ap->a_cred, ap->a_td, VREAD);
5743 error = zfs_create_attrname(ap->a_attrnamespace, "", attrprefix,
5744 sizeof(attrprefix));
5747 plen = strlen(attrprefix);
5754 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
5759 * ENOATTR means that the EA directory does not yet exist,
5760 * i.e. there are no extended attributes there.
5762 if (error == ENOATTR)
5767 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | LOCKSHARED,
5768 UIO_SYSSPACE, ".", xvp, td);
5771 NDFREE(&nd, NDF_ONLY_PNBUF);
5777 auio.uio_iov = &aiov;
5778 auio.uio_iovcnt = 1;
5779 auio.uio_segflg = UIO_SYSSPACE;
5781 auio.uio_rw = UIO_READ;
5782 auio.uio_offset = 0;
5787 aiov.iov_base = (void *)dirbuf;
5788 aiov.iov_len = sizeof(dirbuf);
5789 auio.uio_resid = sizeof(dirbuf);
5790 error = VOP_READDIR(vp, &auio, ap->a_cred, &eof, NULL, NULL);
5791 done = sizeof(dirbuf) - auio.uio_resid;
5794 for (pos = 0; pos < done;) {
5795 dp = (struct dirent *)(dirbuf + pos);
5796 pos += dp->d_reclen;
5798 * XXX: Temporarily we also accept DT_UNKNOWN, as this
5799 * is what we get when attribute was created on Solaris.
5801 if (dp->d_type != DT_REG && dp->d_type != DT_UNKNOWN)
5803 if (plen == 0 && strncmp(dp->d_name, "freebsd:", 8) == 0)
5805 else if (strncmp(dp->d_name, attrprefix, plen) != 0)
5807 nlen = dp->d_namlen - plen;
5810 else if (uio != NULL) {
5812 * Format of extattr name entry is one byte for
5813 * length and the rest for name.
5815 error = uiomove(&nlen, 1, uio->uio_rw, uio);
5817 error = uiomove(dp->d_name + plen, nlen,
5824 } while (!eof && error == 0);
5833 zfs_freebsd_getacl(ap)
5834 struct vop_getacl_args /* {
5843 vsecattr_t vsecattr;
5845 if (ap->a_type != ACL_TYPE_NFS4)
5848 vsecattr.vsa_mask = VSA_ACE | VSA_ACECNT;
5849 if (error = zfs_getsecattr(ap->a_vp, &vsecattr, 0, ap->a_cred, NULL))
5852 error = acl_from_aces(ap->a_aclp, vsecattr.vsa_aclentp, vsecattr.vsa_aclcnt);
5853 if (vsecattr.vsa_aclentp != NULL)
5854 kmem_free(vsecattr.vsa_aclentp, vsecattr.vsa_aclentsz);
5860 zfs_freebsd_setacl(ap)
5861 struct vop_setacl_args /* {
5870 vsecattr_t vsecattr;
5871 int aclbsize; /* size of acl list in bytes */
5874 if (ap->a_type != ACL_TYPE_NFS4)
5877 if (ap->a_aclp == NULL)
5880 if (ap->a_aclp->acl_cnt < 1 || ap->a_aclp->acl_cnt > MAX_ACL_ENTRIES)
5884 * With NFSv4 ACLs, chmod(2) may need to add additional entries,
5885 * splitting every entry into two and appending "canonical six"
5886 * entries at the end. Don't allow for setting an ACL that would
5887 * cause chmod(2) to run out of ACL entries.
5889 if (ap->a_aclp->acl_cnt * 2 + 6 > ACL_MAX_ENTRIES)
5892 error = acl_nfs4_check(ap->a_aclp, ap->a_vp->v_type == VDIR);
5896 vsecattr.vsa_mask = VSA_ACE;
5897 aclbsize = ap->a_aclp->acl_cnt * sizeof(ace_t);
5898 vsecattr.vsa_aclentp = kmem_alloc(aclbsize, KM_SLEEP);
5899 aaclp = vsecattr.vsa_aclentp;
5900 vsecattr.vsa_aclentsz = aclbsize;
5902 aces_from_acl(vsecattr.vsa_aclentp, &vsecattr.vsa_aclcnt, ap->a_aclp);
5903 error = zfs_setsecattr(ap->a_vp, &vsecattr, 0, ap->a_cred, NULL);
5904 kmem_free(aaclp, aclbsize);
5910 zfs_freebsd_aclcheck(ap)
5911 struct vop_aclcheck_args /* {
5920 return (EOPNOTSUPP);
5924 zfs_vptocnp(struct vop_vptocnp_args *ap)
5926 vnode_t *covered_vp;
5927 vnode_t *vp = ap->a_vp;;
5928 zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
5929 znode_t *zp = VTOZ(vp);
5937 * If we are a snapshot mounted under .zfs, run the operation
5938 * on the covered vnode.
5940 if (zp->z_id != zfsvfs->z_root || zfsvfs->z_parent == zfsvfs) {
5941 char name[MAXNAMLEN + 1];
5945 error = zfs_znode_parent_and_name(zp, &dzp, name);
5948 if (*ap->a_buflen < len)
5949 error = SET_ERROR(ENOMEM);
5952 *ap->a_buflen -= len;
5953 bcopy(name, ap->a_buf + *ap->a_buflen, len);
5954 *ap->a_vpp = ZTOV(dzp);
5961 covered_vp = vp->v_mount->mnt_vnodecovered;
5963 ltype = VOP_ISLOCKED(vp);
5965 error = vget(covered_vp, LK_SHARED | LK_VNHELD, curthread);
5967 error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_cred,
5968 ap->a_buf, ap->a_buflen);
5971 vn_lock(vp, ltype | LK_RETRY);
5972 if ((vp->v_iflag & VI_DOOMED) != 0)
5973 error = SET_ERROR(ENOENT);
5980 struct vop_lock1_args /* {
5991 err = vop_stdlock(ap);
5992 if (err == 0 && (ap->a_flags & LK_NOWAIT) == 0) {
5995 if (vp->v_mount != NULL && (vp->v_iflag & VI_DOOMED) == 0 &&
5996 zp != NULL && (zp->z_pflags & ZFS_XATTR) == 0)
5997 VERIFY(!RRM_LOCK_HELD(&zp->z_zfsvfs->z_teardown_lock));
6003 struct vop_vector zfs_vnodeops;
6004 struct vop_vector zfs_fifoops;
6005 struct vop_vector zfs_shareops;
6007 struct vop_vector zfs_vnodeops = {
6008 .vop_default = &default_vnodeops,
6009 .vop_inactive = zfs_freebsd_inactive,
6010 .vop_reclaim = zfs_freebsd_reclaim,
6011 .vop_access = zfs_freebsd_access,
6012 .vop_lookup = zfs_cache_lookup,
6013 .vop_cachedlookup = zfs_freebsd_lookup,
6014 .vop_getattr = zfs_freebsd_getattr,
6015 .vop_setattr = zfs_freebsd_setattr,
6016 .vop_create = zfs_freebsd_create,
6017 .vop_mknod = zfs_freebsd_create,
6018 .vop_mkdir = zfs_freebsd_mkdir,
6019 .vop_readdir = zfs_freebsd_readdir,
6020 .vop_fsync = zfs_freebsd_fsync,
6021 .vop_open = zfs_freebsd_open,
6022 .vop_close = zfs_freebsd_close,
6023 .vop_rmdir = zfs_freebsd_rmdir,
6024 .vop_ioctl = zfs_freebsd_ioctl,
6025 .vop_link = zfs_freebsd_link,
6026 .vop_symlink = zfs_freebsd_symlink,
6027 .vop_readlink = zfs_freebsd_readlink,
6028 .vop_read = zfs_freebsd_read,
6029 .vop_write = zfs_freebsd_write,
6030 .vop_remove = zfs_freebsd_remove,
6031 .vop_rename = zfs_freebsd_rename,
6032 .vop_pathconf = zfs_freebsd_pathconf,
6033 .vop_bmap = zfs_freebsd_bmap,
6034 .vop_fid = zfs_freebsd_fid,
6035 .vop_getextattr = zfs_getextattr,
6036 .vop_deleteextattr = zfs_deleteextattr,
6037 .vop_setextattr = zfs_setextattr,
6038 .vop_listextattr = zfs_listextattr,
6039 .vop_getacl = zfs_freebsd_getacl,
6040 .vop_setacl = zfs_freebsd_setacl,
6041 .vop_aclcheck = zfs_freebsd_aclcheck,
6042 .vop_getpages = zfs_freebsd_getpages,
6043 .vop_putpages = zfs_freebsd_putpages,
6044 .vop_vptocnp = zfs_vptocnp,
6046 .vop_lock1 = zfs_lock,
6050 struct vop_vector zfs_fifoops = {
6051 .vop_default = &fifo_specops,
6052 .vop_fsync = zfs_freebsd_fsync,
6053 .vop_access = zfs_freebsd_access,
6054 .vop_getattr = zfs_freebsd_getattr,
6055 .vop_inactive = zfs_freebsd_inactive,
6056 .vop_read = VOP_PANIC,
6057 .vop_reclaim = zfs_freebsd_reclaim,
6058 .vop_setattr = zfs_freebsd_setattr,
6059 .vop_write = VOP_PANIC,
6060 .vop_pathconf = zfs_freebsd_fifo_pathconf,
6061 .vop_fid = zfs_freebsd_fid,
6062 .vop_getacl = zfs_freebsd_getacl,
6063 .vop_setacl = zfs_freebsd_setacl,
6064 .vop_aclcheck = zfs_freebsd_aclcheck,
6068 * special share hidden files vnode operations template
6070 struct vop_vector zfs_shareops = {
6071 .vop_default = &default_vnodeops,
6072 .vop_access = zfs_freebsd_access,
6073 .vop_inactive = zfs_freebsd_inactive,
6074 .vop_reclaim = zfs_freebsd_reclaim,
6075 .vop_fid = zfs_freebsd_fid,
6076 .vop_pathconf = zfs_freebsd_pathconf,