4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
26 /* Portions Copyright 2007 Jeremy Teo */
27 /* Portions Copyright 2010 Robert Milkowski */
29 #include <sys/types.h>
30 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/sysmacros.h>
34 #include <sys/resource.h>
36 #include <sys/vnode.h>
40 #include <sys/taskq.h>
42 #include <sys/atomic.h>
43 #include <sys/namei.h>
45 #include <sys/cmn_err.h>
46 #include <sys/errno.h>
47 #include <sys/unistd.h>
48 #include <sys/zfs_dir.h>
49 #include <sys/zfs_ioctl.h>
50 #include <sys/fs/zfs.h>
52 #include <sys/dmu_objset.h>
58 #include <sys/dirent.h>
59 #include <sys/policy.h>
60 #include <sys/sunddi.h>
61 #include <sys/filio.h>
63 #include <sys/zfs_ctldir.h>
64 #include <sys/zfs_fuid.h>
65 #include <sys/zfs_sa.h>
67 #include <sys/zfs_rlock.h>
68 #include <sys/extdirent.h>
69 #include <sys/kidmap.h>
72 #include <sys/sf_buf.h>
73 #include <sys/sched.h>
75 #include <vm/vm_pageout.h>
80 * Each vnode op performs some logical unit of work. To do this, the ZPL must
81 * properly lock its in-core state, create a DMU transaction, do the work,
82 * record this work in the intent log (ZIL), commit the DMU transaction,
83 * and wait for the intent log to commit if it is a synchronous operation.
84 * Moreover, the vnode ops must work in both normal and log replay context.
85 * The ordering of events is important to avoid deadlocks and references
86 * to freed memory. The example below illustrates the following Big Rules:
88 * (1) A check must be made in each zfs thread for a mounted file system.
89 * This is done avoiding races using ZFS_ENTER(zfsvfs).
90 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
91 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
92 * can return EIO from the calling function.
94 * (2) VN_RELE() should always be the last thing except for zil_commit()
95 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
96 * First, if it's the last reference, the vnode/znode
97 * can be freed, so the zp may point to freed memory. Second, the last
98 * reference will call zfs_zinactive(), which may induce a lot of work --
99 * pushing cached pages (which acquires range locks) and syncing out
100 * cached atime changes. Third, zfs_zinactive() may require a new tx,
101 * which could deadlock the system if you were already holding one.
102 * If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
104 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
105 * as they can span dmu_tx_assign() calls.
107 * (4) Always pass TXG_NOWAIT as the second argument to dmu_tx_assign().
108 * This is critical because we don't want to block while holding locks.
109 * Note, in particular, that if a lock is sometimes acquired before
110 * the tx assigns, and sometimes after (e.g. z_lock), then failing to
111 * use a non-blocking assign can deadlock the system. The scenario:
113 * Thread A has grabbed a lock before calling dmu_tx_assign().
114 * Thread B is in an already-assigned tx, and blocks for this lock.
115 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
116 * forever, because the previous txg can't quiesce until B's tx commits.
118 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
119 * then drop all locks, call dmu_tx_wait(), and try again.
121 * (5) If the operation succeeded, generate the intent log entry for it
122 * before dropping locks. This ensures that the ordering of events
123 * in the intent log matches the order in which they actually occurred.
124 * During ZIL replay the zfs_log_* functions will update the sequence
125 * number to indicate the zil transaction has replayed.
127 * (6) At the end of each vnode op, the DMU tx must always commit,
128 * regardless of whether there were any errors.
130 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
131 * to ensure that synchronous semantics are provided when necessary.
133 * In general, this is how things should be ordered in each vnode op:
135 * ZFS_ENTER(zfsvfs); // exit if unmounted
137 * zfs_dirent_lock(&dl, ...) // lock directory entry (may VN_HOLD())
138 * rw_enter(...); // grab any other locks you need
139 * tx = dmu_tx_create(...); // get DMU tx
140 * dmu_tx_hold_*(); // hold each object you might modify
141 * error = dmu_tx_assign(tx, TXG_NOWAIT); // try to assign
143 * rw_exit(...); // drop locks
144 * zfs_dirent_unlock(dl); // unlock directory entry
145 * VN_RELE(...); // release held vnodes
146 * if (error == ERESTART) {
151 * dmu_tx_abort(tx); // abort DMU tx
152 * ZFS_EXIT(zfsvfs); // finished in zfs
153 * return (error); // really out of space
155 * error = do_real_work(); // do whatever this VOP does
157 * zfs_log_*(...); // on success, make ZIL entry
158 * dmu_tx_commit(tx); // commit DMU tx -- error or not
159 * rw_exit(...); // drop locks
160 * zfs_dirent_unlock(dl); // unlock directory entry
161 * VN_RELE(...); // release held vnodes
162 * zil_commit(zilog, foid); // synchronous when necessary
163 * ZFS_EXIT(zfsvfs); // finished in zfs
164 * return (error); // done, report error
169 zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
171 znode_t *zp = VTOZ(*vpp);
172 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
177 if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
178 ((flag & FAPPEND) == 0)) {
180 return (SET_ERROR(EPERM));
183 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
184 ZTOV(zp)->v_type == VREG &&
185 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
186 if (fs_vscan(*vpp, cr, 0) != 0) {
188 return (SET_ERROR(EACCES));
192 /* Keep a count of the synchronous opens in the znode */
193 if (flag & (FSYNC | FDSYNC))
194 atomic_inc_32(&zp->z_sync_cnt);
202 zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
203 caller_context_t *ct)
205 znode_t *zp = VTOZ(vp);
206 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
209 * Clean up any locks held by this process on the vp.
211 cleanlocks(vp, ddi_get_pid(), 0);
212 cleanshares(vp, ddi_get_pid());
217 /* Decrement the synchronous opens in the znode */
218 if ((flag & (FSYNC | FDSYNC)) && (count == 1))
219 atomic_dec_32(&zp->z_sync_cnt);
221 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
222 ZTOV(zp)->v_type == VREG &&
223 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
224 VERIFY(fs_vscan(vp, cr, 1) == 0);
231 * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and
232 * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter.
235 zfs_holey(vnode_t *vp, u_long cmd, offset_t *off)
237 znode_t *zp = VTOZ(vp);
238 uint64_t noff = (uint64_t)*off; /* new offset */
243 file_sz = zp->z_size;
244 if (noff >= file_sz) {
245 return (SET_ERROR(ENXIO));
248 if (cmd == _FIO_SEEK_HOLE)
253 error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff);
256 if ((error == ESRCH) || (noff > file_sz)) {
258 * Handle the virtual hole at the end of file.
264 return (SET_ERROR(ENXIO));
275 zfs_ioctl(vnode_t *vp, u_long com, intptr_t data, int flag, cred_t *cred,
276 int *rvalp, caller_context_t *ct)
288 * The following two ioctls are used by bfu. Faking out,
289 * necessary to avoid bfu errors.
298 if (ddi_copyin((void *)data, &off, sizeof (off), flag))
299 return (SET_ERROR(EFAULT));
301 off = *(offset_t *)data;
304 zfsvfs = zp->z_zfsvfs;
308 /* offset parameter is in/out */
309 error = zfs_holey(vp, com, &off);
314 if (ddi_copyout(&off, (void *)data, sizeof (off), flag))
315 return (SET_ERROR(EFAULT));
317 *(offset_t *)data = off;
321 return (SET_ERROR(ENOTTY));
325 page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
331 VM_OBJECT_LOCK_ASSERT(obj, MA_OWNED);
334 if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
336 if ((pp->oflags & VPO_BUSY) != 0) {
338 * Reference the page before unlocking and
339 * sleeping so that the page daemon is less
340 * likely to reclaim it.
342 vm_page_reference(pp);
343 vm_page_sleep(pp, "zfsmwb");
346 } else if (pp == NULL) {
347 pp = vm_page_alloc(obj, OFF_TO_IDX(start),
348 VM_ALLOC_SYSTEM | VM_ALLOC_IFCACHED |
351 ASSERT(pp != NULL && !pp->valid);
356 ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
357 vm_object_pip_add(obj, 1);
358 vm_page_io_start(pp);
359 pmap_remove_write(pp);
360 vm_page_clear_dirty(pp, off, nbytes);
368 page_unbusy(vm_page_t pp)
371 vm_page_io_finish(pp);
372 vm_object_pip_subtract(pp->object, 1);
376 page_hold(vnode_t *vp, int64_t start)
382 VM_OBJECT_LOCK_ASSERT(obj, MA_OWNED);
385 if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
387 if ((pp->oflags & VPO_BUSY) != 0) {
389 * Reference the page before unlocking and
390 * sleeping so that the page daemon is less
391 * likely to reclaim it.
393 vm_page_reference(pp);
394 vm_page_sleep(pp, "zfsmwb");
398 ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
411 page_unhold(vm_page_t pp)
420 zfs_map_page(vm_page_t pp, struct sf_buf **sfp)
423 *sfp = sf_buf_alloc(pp, 0);
424 return ((caddr_t)sf_buf_kva(*sfp));
428 zfs_unmap_page(struct sf_buf *sf)
435 * When a file is memory mapped, we must keep the IO data synchronized
436 * between the DMU cache and the memory mapped pages. What this means:
438 * On Write: If we find a memory mapped page, we write to *both*
439 * the page and the dmu buffer.
442 update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
443 int segflg, dmu_tx_t *tx)
450 ASSERT(vp->v_mount != NULL);
454 off = start & PAGEOFFSET;
456 for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
458 int nbytes = imin(PAGESIZE - off, len);
460 if (segflg == UIO_NOCOPY) {
461 pp = vm_page_lookup(obj, OFF_TO_IDX(start));
463 ("zfs update_pages: NULL page in putpages case"));
465 ("zfs update_pages: unaligned data in putpages case"));
466 KASSERT(pp->valid == VM_PAGE_BITS_ALL,
467 ("zfs update_pages: invalid page in putpages case"));
468 KASSERT(pp->busy > 0,
469 ("zfs update_pages: unbusy page in putpages case"));
470 KASSERT(!pmap_page_is_write_mapped(pp),
471 ("zfs update_pages: writable page in putpages case"));
472 VM_OBJECT_UNLOCK(obj);
474 va = zfs_map_page(pp, &sf);
475 (void) dmu_write(os, oid, start, nbytes, va, tx);
480 } else if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
481 VM_OBJECT_UNLOCK(obj);
483 va = zfs_map_page(pp, &sf);
484 (void) dmu_read(os, oid, start+off, nbytes,
485 va+off, DMU_READ_PREFETCH);;
494 if (segflg != UIO_NOCOPY)
495 vm_object_pip_wakeupn(obj, 0);
496 VM_OBJECT_UNLOCK(obj);
500 * Read with UIO_NOCOPY flag means that sendfile(2) requests
501 * ZFS to populate a range of page cache pages with data.
503 * NOTE: this function could be optimized to pre-allocate
504 * all pages in advance, drain VPO_BUSY on all of them,
505 * map them into contiguous KVA region and populate them
506 * in one single dmu_read() call.
509 mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio)
511 znode_t *zp = VTOZ(vp);
512 objset_t *os = zp->z_zfsvfs->z_os;
522 ASSERT(uio->uio_segflg == UIO_NOCOPY);
523 ASSERT(vp->v_mount != NULL);
526 ASSERT((uio->uio_loffset & PAGEOFFSET) == 0);
529 for (start = uio->uio_loffset; len > 0; start += PAGESIZE) {
530 int bytes = MIN(PAGESIZE, len);
532 pp = vm_page_grab(obj, OFF_TO_IDX(start), VM_ALLOC_NOBUSY |
533 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_IGN_SBUSY);
534 if (pp->valid == 0) {
535 vm_page_io_start(pp);
536 VM_OBJECT_UNLOCK(obj);
537 va = zfs_map_page(pp, &sf);
538 error = dmu_read(os, zp->z_id, start, bytes, va,
540 if (bytes != PAGESIZE && error == 0)
541 bzero(va + bytes, PAGESIZE - bytes);
544 vm_page_io_finish(pp);
549 pp->valid = VM_PAGE_BITS_ALL;
550 vm_page_activate(pp);
556 uio->uio_resid -= bytes;
557 uio->uio_offset += bytes;
560 VM_OBJECT_UNLOCK(obj);
565 * When a file is memory mapped, we must keep the IO data synchronized
566 * between the DMU cache and the memory mapped pages. What this means:
568 * On Read: We "read" preferentially from memory mapped pages,
569 * else we default from the dmu buffer.
571 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
572 * the file is memory mapped.
575 mappedread(vnode_t *vp, int nbytes, uio_t *uio)
577 znode_t *zp = VTOZ(vp);
578 objset_t *os = zp->z_zfsvfs->z_os;
586 ASSERT(vp->v_mount != NULL);
590 start = uio->uio_loffset;
591 off = start & PAGEOFFSET;
593 for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
595 uint64_t bytes = MIN(PAGESIZE - off, len);
597 if (pp = page_hold(vp, start)) {
601 VM_OBJECT_UNLOCK(obj);
602 va = zfs_map_page(pp, &sf);
603 error = uiomove(va + off, bytes, UIO_READ, uio);
608 VM_OBJECT_UNLOCK(obj);
609 error = dmu_read_uio(os, zp->z_id, uio, bytes);
617 VM_OBJECT_UNLOCK(obj);
621 offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
624 * Read bytes from specified file into supplied buffer.
626 * IN: vp - vnode of file to be read from.
627 * uio - structure supplying read location, range info,
629 * ioflag - SYNC flags; used to provide FRSYNC semantics.
630 * cr - credentials of caller.
631 * ct - caller context
633 * OUT: uio - updated offset and range, buffer filled.
635 * RETURN: 0 on success, error code on failure.
638 * vp - atime updated if byte count > 0
642 zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
644 znode_t *zp = VTOZ(vp);
645 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
656 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
658 return (SET_ERROR(EACCES));
662 * Validate file offset
664 if (uio->uio_loffset < (offset_t)0) {
666 return (SET_ERROR(EINVAL));
670 * Fasttrack empty reads
672 if (uio->uio_resid == 0) {
678 * Check for mandatory locks
680 if (MANDMODE(zp->z_mode)) {
681 if (error = chklock(vp, FREAD,
682 uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) {
689 * If we're in FRSYNC mode, sync out this znode before reading it.
692 (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
693 zil_commit(zfsvfs->z_log, zp->z_id);
696 * Lock the range against changes.
698 rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER);
701 * If we are reading past end-of-file we can skip
702 * to the end; but we might still need to set atime.
704 if (uio->uio_loffset >= zp->z_size) {
709 ASSERT(uio->uio_loffset < zp->z_size);
710 n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
713 if ((uio->uio_extflg == UIO_XUIO) &&
714 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
716 int blksz = zp->z_blksz;
717 uint64_t offset = uio->uio_loffset;
719 xuio = (xuio_t *)uio;
721 nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
724 ASSERT(offset + n <= blksz);
727 (void) dmu_xuio_init(xuio, nblk);
729 if (vn_has_cached_data(vp)) {
731 * For simplicity, we always allocate a full buffer
732 * even if we only expect to read a portion of a block.
734 while (--nblk >= 0) {
735 (void) dmu_xuio_add(xuio,
736 dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
744 nbytes = MIN(n, zfs_read_chunk_size -
745 P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
748 if (uio->uio_segflg == UIO_NOCOPY)
749 error = mappedread_sf(vp, nbytes, uio);
751 #endif /* __FreeBSD__ */
752 if (vn_has_cached_data(vp))
753 error = mappedread(vp, nbytes, uio);
755 error = dmu_read_uio(os, zp->z_id, uio, nbytes);
757 /* convert checksum errors into IO errors */
759 error = SET_ERROR(EIO);
766 zfs_range_unlock(rl);
768 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
774 * Write the bytes to a file.
776 * IN: vp - vnode of file to be written to.
777 * uio - structure supplying write location, range info,
779 * ioflag - FAPPEND, FSYNC, and/or FDSYNC. FAPPEND is
780 * set if in append mode.
781 * cr - credentials of caller.
782 * ct - caller context (NFS/CIFS fem monitor only)
784 * OUT: uio - updated offset and range.
786 * RETURN: 0 on success, error code on failure.
789 * vp - ctime|mtime updated if byte count > 0
794 zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
796 znode_t *zp = VTOZ(vp);
797 rlim64_t limit = MAXOFFSET_T;
798 ssize_t start_resid = uio->uio_resid;
802 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
807 int max_blksz = zfsvfs->z_max_blksz;
810 iovec_t *aiov = NULL;
813 int iovcnt = uio->uio_iovcnt;
814 iovec_t *iovp = uio->uio_iov;
817 sa_bulk_attr_t bulk[4];
818 uint64_t mtime[2], ctime[2];
821 * Fasttrack empty write
827 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
833 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
834 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
835 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
837 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
841 * If immutable or not appending then return EPERM
843 if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
844 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
845 (uio->uio_loffset < zp->z_size))) {
847 return (SET_ERROR(EPERM));
850 zilog = zfsvfs->z_log;
853 * Validate file offset
855 woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
858 return (SET_ERROR(EINVAL));
862 * Check for mandatory locks before calling zfs_range_lock()
863 * in order to prevent a deadlock with locks set via fcntl().
865 if (MANDMODE((mode_t)zp->z_mode) &&
866 (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) {
873 * Pre-fault the pages to ensure slow (eg NFS) pages
875 * Skip this if uio contains loaned arc_buf.
877 if ((uio->uio_extflg == UIO_XUIO) &&
878 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
879 xuio = (xuio_t *)uio;
881 uio_prefaultpages(MIN(n, max_blksz), uio);
885 * If in append mode, set the io offset pointer to eof.
887 if (ioflag & FAPPEND) {
889 * Obtain an appending range lock to guarantee file append
890 * semantics. We reset the write offset once we have the lock.
892 rl = zfs_range_lock(zp, 0, n, RL_APPEND);
894 if (rl->r_len == UINT64_MAX) {
896 * We overlocked the file because this write will cause
897 * the file block size to increase.
898 * Note that zp_size cannot change with this lock held.
902 uio->uio_loffset = woff;
905 * Note that if the file block size will change as a result of
906 * this write, then this range lock will lock the entire file
907 * so that we can re-write the block safely.
909 rl = zfs_range_lock(zp, woff, n, RL_WRITER);
912 if (vn_rlimit_fsize(vp, uio, uio->uio_td)) {
913 zfs_range_unlock(rl);
919 zfs_range_unlock(rl);
921 return (SET_ERROR(EFBIG));
924 if ((woff + n) > limit || woff > (limit - n))
927 /* Will this write extend the file length? */
928 write_eof = (woff + n > zp->z_size);
930 end_size = MAX(zp->z_size, woff + n);
933 * Write the file in reasonable size chunks. Each chunk is written
934 * in a separate transaction; this keeps the intent log records small
935 * and allows us to do more fine-grained space accounting.
939 woff = uio->uio_loffset;
941 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
942 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
944 dmu_return_arcbuf(abuf);
945 error = SET_ERROR(EDQUOT);
949 if (xuio && abuf == NULL) {
950 ASSERT(i_iov < iovcnt);
952 abuf = dmu_xuio_arcbuf(xuio, i_iov);
953 dmu_xuio_clear(xuio, i_iov);
954 DTRACE_PROBE3(zfs_cp_write, int, i_iov,
955 iovec_t *, aiov, arc_buf_t *, abuf);
956 ASSERT((aiov->iov_base == abuf->b_data) ||
957 ((char *)aiov->iov_base - (char *)abuf->b_data +
958 aiov->iov_len == arc_buf_size(abuf)));
960 } else if (abuf == NULL && n >= max_blksz &&
961 woff >= zp->z_size &&
962 P2PHASE(woff, max_blksz) == 0 &&
963 zp->z_blksz == max_blksz) {
965 * This write covers a full block. "Borrow" a buffer
966 * from the dmu so that we can fill it before we enter
967 * a transaction. This avoids the possibility of
968 * holding up the transaction if the data copy hangs
969 * up on a pagefault (e.g., from an NFS server mapping).
973 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
975 ASSERT(abuf != NULL);
976 ASSERT(arc_buf_size(abuf) == max_blksz);
977 if (error = uiocopy(abuf->b_data, max_blksz,
978 UIO_WRITE, uio, &cbytes)) {
979 dmu_return_arcbuf(abuf);
982 ASSERT(cbytes == max_blksz);
986 * Start a transaction.
988 tx = dmu_tx_create(zfsvfs->z_os);
989 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
990 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
991 zfs_sa_upgrade_txholds(tx, zp);
992 error = dmu_tx_assign(tx, TXG_NOWAIT);
994 if (error == ERESTART) {
1001 dmu_return_arcbuf(abuf);
1006 * If zfs_range_lock() over-locked we grow the blocksize
1007 * and then reduce the lock range. This will only happen
1008 * on the first iteration since zfs_range_reduce() will
1009 * shrink down r_len to the appropriate size.
1011 if (rl->r_len == UINT64_MAX) {
1014 if (zp->z_blksz > max_blksz) {
1015 ASSERT(!ISP2(zp->z_blksz));
1016 new_blksz = MIN(end_size, SPA_MAXBLOCKSIZE);
1018 new_blksz = MIN(end_size, max_blksz);
1020 zfs_grow_blocksize(zp, new_blksz, tx);
1021 zfs_range_reduce(rl, woff, n);
1025 * XXX - should we really limit each write to z_max_blksz?
1026 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
1028 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
1030 if (woff + nbytes > zp->z_size)
1031 vnode_pager_setsize(vp, woff + nbytes);
1034 tx_bytes = uio->uio_resid;
1035 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
1037 tx_bytes -= uio->uio_resid;
1040 ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
1042 * If this is not a full block write, but we are
1043 * extending the file past EOF and this data starts
1044 * block-aligned, use assign_arcbuf(). Otherwise,
1045 * write via dmu_write().
1047 if (tx_bytes < max_blksz && (!write_eof ||
1048 aiov->iov_base != abuf->b_data)) {
1050 dmu_write(zfsvfs->z_os, zp->z_id, woff,
1051 aiov->iov_len, aiov->iov_base, tx);
1052 dmu_return_arcbuf(abuf);
1053 xuio_stat_wbuf_copied();
1055 ASSERT(xuio || tx_bytes == max_blksz);
1056 dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl),
1059 ASSERT(tx_bytes <= uio->uio_resid);
1060 uioskip(uio, tx_bytes);
1062 if (tx_bytes && vn_has_cached_data(vp)) {
1063 update_pages(vp, woff, tx_bytes, zfsvfs->z_os,
1064 zp->z_id, uio->uio_segflg, tx);
1068 * If we made no progress, we're done. If we made even
1069 * partial progress, update the znode and ZIL accordingly.
1071 if (tx_bytes == 0) {
1072 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
1073 (void *)&zp->z_size, sizeof (uint64_t), tx);
1080 * Clear Set-UID/Set-GID bits on successful write if not
1081 * privileged and at least one of the excute bits is set.
1083 * It would be nice to to this after all writes have
1084 * been done, but that would still expose the ISUID/ISGID
1085 * to another app after the partial write is committed.
1087 * Note: we don't call zfs_fuid_map_id() here because
1088 * user 0 is not an ephemeral uid.
1090 mutex_enter(&zp->z_acl_lock);
1091 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
1092 (S_IXUSR >> 6))) != 0 &&
1093 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
1094 secpolicy_vnode_setid_retain(vp, cr,
1095 (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) {
1097 zp->z_mode &= ~(S_ISUID | S_ISGID);
1098 newmode = zp->z_mode;
1099 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
1100 (void *)&newmode, sizeof (uint64_t), tx);
1102 mutex_exit(&zp->z_acl_lock);
1104 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
1108 * Update the file size (zp_size) if it has changed;
1109 * account for possible concurrent updates.
1111 while ((end_size = zp->z_size) < uio->uio_loffset) {
1112 (void) atomic_cas_64(&zp->z_size, end_size,
1117 * If we are replaying and eof is non zero then force
1118 * the file size to the specified eof. Note, there's no
1119 * concurrency during replay.
1121 if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
1122 zp->z_size = zfsvfs->z_replay_eof;
1124 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
1126 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag);
1131 ASSERT(tx_bytes == nbytes);
1136 uio_prefaultpages(MIN(n, max_blksz), uio);
1140 zfs_range_unlock(rl);
1143 * If we're in replay mode, or we made no progress, return error.
1144 * Otherwise, it's at least a partial write, so it's successful.
1146 if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
1151 if (ioflag & (FSYNC | FDSYNC) ||
1152 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1153 zil_commit(zilog, zp->z_id);
1160 zfs_get_done(zgd_t *zgd, int error)
1162 znode_t *zp = zgd->zgd_private;
1163 objset_t *os = zp->z_zfsvfs->z_os;
1167 dmu_buf_rele(zgd->zgd_db, zgd);
1169 zfs_range_unlock(zgd->zgd_rl);
1171 vfslocked = VFS_LOCK_GIANT(zp->z_zfsvfs->z_vfs);
1173 * Release the vnode asynchronously as we currently have the
1174 * txg stopped from syncing.
1176 VN_RELE_ASYNC(ZTOV(zp), dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
1178 if (error == 0 && zgd->zgd_bp)
1179 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1181 kmem_free(zgd, sizeof (zgd_t));
1182 VFS_UNLOCK_GIANT(vfslocked);
1186 static int zil_fault_io = 0;
1190 * Get data to generate a TX_WRITE intent log record.
1193 zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1195 zfsvfs_t *zfsvfs = arg;
1196 objset_t *os = zfsvfs->z_os;
1198 uint64_t object = lr->lr_foid;
1199 uint64_t offset = lr->lr_offset;
1200 uint64_t size = lr->lr_length;
1201 blkptr_t *bp = &lr->lr_blkptr;
1206 ASSERT(zio != NULL);
1210 * Nothing to do if the file has been removed
1212 if (zfs_zget(zfsvfs, object, &zp) != 0)
1213 return (SET_ERROR(ENOENT));
1214 if (zp->z_unlinked) {
1216 * Release the vnode asynchronously as we currently have the
1217 * txg stopped from syncing.
1219 VN_RELE_ASYNC(ZTOV(zp),
1220 dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
1221 return (SET_ERROR(ENOENT));
1224 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1225 zgd->zgd_zilog = zfsvfs->z_log;
1226 zgd->zgd_private = zp;
1229 * Write records come in two flavors: immediate and indirect.
1230 * For small writes it's cheaper to store the data with the
1231 * log record (immediate); for large writes it's cheaper to
1232 * sync the data and get a pointer to it (indirect) so that
1233 * we don't have to write the data twice.
1235 if (buf != NULL) { /* immediate write */
1236 zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER);
1237 /* test for truncation needs to be done while range locked */
1238 if (offset >= zp->z_size) {
1239 error = SET_ERROR(ENOENT);
1241 error = dmu_read(os, object, offset, size, buf,
1242 DMU_READ_NO_PREFETCH);
1244 ASSERT(error == 0 || error == ENOENT);
1245 } else { /* indirect write */
1247 * Have to lock the whole block to ensure when it's
1248 * written out and it's checksum is being calculated
1249 * that no one can change the data. We need to re-check
1250 * blocksize after we get the lock in case it's changed!
1255 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
1257 zgd->zgd_rl = zfs_range_lock(zp, offset, size,
1259 if (zp->z_blksz == size)
1262 zfs_range_unlock(zgd->zgd_rl);
1264 /* test for truncation needs to be done while range locked */
1265 if (lr->lr_offset >= zp->z_size)
1266 error = SET_ERROR(ENOENT);
1269 error = SET_ERROR(EIO);
1274 error = dmu_buf_hold(os, object, offset, zgd, &db,
1275 DMU_READ_NO_PREFETCH);
1278 blkptr_t *obp = dmu_buf_get_blkptr(db);
1280 ASSERT(BP_IS_HOLE(bp));
1287 ASSERT(db->db_offset == offset);
1288 ASSERT(db->db_size == size);
1290 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1292 ASSERT(error || lr->lr_length <= zp->z_blksz);
1295 * On success, we need to wait for the write I/O
1296 * initiated by dmu_sync() to complete before we can
1297 * release this dbuf. We will finish everything up
1298 * in the zfs_get_done() callback.
1303 if (error == EALREADY) {
1304 lr->lr_common.lrc_txtype = TX_WRITE2;
1310 zfs_get_done(zgd, error);
1317 zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr,
1318 caller_context_t *ct)
1320 znode_t *zp = VTOZ(vp);
1321 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1327 if (flag & V_ACE_MASK)
1328 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
1330 error = zfs_zaccess_rwx(zp, mode, flag, cr);
1337 * If vnode is for a device return a specfs vnode instead.
1340 specvp_check(vnode_t **vpp, cred_t *cr)
1344 if (IS_DEVVP(*vpp)) {
1347 svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr);
1350 error = SET_ERROR(ENOSYS);
1358 * Lookup an entry in a directory, or an extended attribute directory.
1359 * If it exists, return a held vnode reference for it.
1361 * IN: dvp - vnode of directory to search.
1362 * nm - name of entry to lookup.
1363 * pnp - full pathname to lookup [UNUSED].
1364 * flags - LOOKUP_XATTR set if looking for an attribute.
1365 * rdir - root directory vnode [UNUSED].
1366 * cr - credentials of caller.
1367 * ct - caller context
1368 * direntflags - directory lookup flags
1369 * realpnp - returned pathname.
1371 * OUT: vpp - vnode of located entry, NULL if not found.
1373 * RETURN: 0 on success, error code on failure.
1380 zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp,
1381 int nameiop, cred_t *cr, kthread_t *td, int flags)
1383 znode_t *zdp = VTOZ(dvp);
1384 zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
1386 int *direntflags = NULL;
1387 void *realpnp = NULL;
1390 if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
1392 if (dvp->v_type != VDIR) {
1393 return (SET_ERROR(ENOTDIR));
1394 } else if (zdp->z_sa_hdl == NULL) {
1395 return (SET_ERROR(EIO));
1398 if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
1399 error = zfs_fastaccesschk_execute(zdp, cr);
1407 vnode_t *tvp = dnlc_lookup(dvp, nm);
1410 error = zfs_fastaccesschk_execute(zdp, cr);
1415 if (tvp == DNLC_NO_VNODE) {
1417 return (SET_ERROR(ENOENT));
1420 return (specvp_check(vpp, cr));
1426 DTRACE_PROBE2(zfs__fastpath__lookup__miss, vnode_t *, dvp, char *, nm);
1433 if (flags & LOOKUP_XATTR) {
1436 * If the xattr property is off, refuse the lookup request.
1438 if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) {
1440 return (SET_ERROR(EINVAL));
1445 * We don't allow recursive attributes..
1446 * Maybe someday we will.
1448 if (zdp->z_pflags & ZFS_XATTR) {
1450 return (SET_ERROR(EINVAL));
1453 if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) {
1459 * Do we have permission to get into attribute directory?
1462 if (error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, 0,
1472 if (dvp->v_type != VDIR) {
1474 return (SET_ERROR(ENOTDIR));
1478 * Check accessibility of directory.
1481 if (error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr)) {
1486 if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
1487 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1489 return (SET_ERROR(EILSEQ));
1492 error = zfs_dirlook(zdp, nm, vpp, flags, direntflags, realpnp);
1494 error = specvp_check(vpp, cr);
1496 /* Translate errors and add SAVENAME when needed. */
1497 if (cnp->cn_flags & ISLASTCN) {
1501 if (error == ENOENT) {
1502 error = EJUSTRETURN;
1503 cnp->cn_flags |= SAVENAME;
1509 cnp->cn_flags |= SAVENAME;
1513 if (error == 0 && (nm[0] != '.' || nm[1] != '\0')) {
1516 if (cnp->cn_flags & ISDOTDOT) {
1517 ltype = VOP_ISLOCKED(dvp);
1521 error = zfs_vnode_lock(*vpp, cnp->cn_lkflags);
1522 if (cnp->cn_flags & ISDOTDOT)
1523 vn_lock(dvp, ltype | LK_RETRY);
1533 #ifdef FREEBSD_NAMECACHE
1535 * Insert name into cache (as non-existent) if appropriate.
1537 if (error == ENOENT && (cnp->cn_flags & MAKEENTRY) && nameiop != CREATE)
1538 cache_enter(dvp, *vpp, cnp);
1540 * Insert name into cache if appropriate.
1542 if (error == 0 && (cnp->cn_flags & MAKEENTRY)) {
1543 if (!(cnp->cn_flags & ISLASTCN) ||
1544 (nameiop != DELETE && nameiop != RENAME)) {
1545 cache_enter(dvp, *vpp, cnp);
1554 * Attempt to create a new entry in a directory. If the entry
1555 * already exists, truncate the file if permissible, else return
1556 * an error. Return the vp of the created or trunc'd file.
1558 * IN: dvp - vnode of directory to put new file entry in.
1559 * name - name of new file entry.
1560 * vap - attributes of new file.
1561 * excl - flag indicating exclusive or non-exclusive mode.
1562 * mode - mode to open file with.
1563 * cr - credentials of caller.
1564 * flag - large file flag [UNUSED].
1565 * ct - caller context
1566 * vsecp - ACL to be set
1568 * OUT: vpp - vnode of created or trunc'd entry.
1570 * RETURN: 0 on success, error code on failure.
1573 * dvp - ctime|mtime updated if new entry created
1574 * vp - ctime|mtime always, atime if new
1579 zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode,
1580 vnode_t **vpp, cred_t *cr, kthread_t *td)
1582 znode_t *zp, *dzp = VTOZ(dvp);
1583 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1591 gid_t gid = crgetgid(cr);
1592 zfs_acl_ids_t acl_ids;
1593 boolean_t fuid_dirtied;
1594 boolean_t have_acl = B_FALSE;
1599 * If we have an ephemeral id, ACL, or XVATTR then
1600 * make sure file system is at proper version
1603 ksid = crgetsid(cr, KSID_OWNER);
1605 uid = ksid_getid(ksid);
1609 if (zfsvfs->z_use_fuids == B_FALSE &&
1610 (vsecp || (vap->va_mask & AT_XVATTR) ||
1611 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1612 return (SET_ERROR(EINVAL));
1617 zilog = zfsvfs->z_log;
1619 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
1620 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1622 return (SET_ERROR(EILSEQ));
1625 if (vap->va_mask & AT_XVATTR) {
1626 if ((error = secpolicy_xvattr(dvp, (xvattr_t *)vap,
1627 crgetuid(cr), cr, vap->va_type)) != 0) {
1635 if ((vap->va_mode & S_ISVTX) && secpolicy_vnode_stky_modify(cr))
1636 vap->va_mode &= ~S_ISVTX;
1638 if (*name == '\0') {
1640 * Null component name refers to the directory itself.
1647 /* possible VN_HOLD(zp) */
1650 if (flag & FIGNORECASE)
1653 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1657 zfs_acl_ids_free(&acl_ids);
1658 if (strcmp(name, "..") == 0)
1659 error = SET_ERROR(EISDIR);
1669 * Create a new file object and update the directory
1672 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
1674 zfs_acl_ids_free(&acl_ids);
1679 * We only support the creation of regular files in
1680 * extended attribute directories.
1683 if ((dzp->z_pflags & ZFS_XATTR) &&
1684 (vap->va_type != VREG)) {
1686 zfs_acl_ids_free(&acl_ids);
1687 error = SET_ERROR(EINVAL);
1691 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1692 cr, vsecp, &acl_ids)) != 0)
1696 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
1697 zfs_acl_ids_free(&acl_ids);
1698 error = SET_ERROR(EDQUOT);
1702 tx = dmu_tx_create(os);
1704 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1705 ZFS_SA_BASE_ATTR_SIZE);
1707 fuid_dirtied = zfsvfs->z_fuid_dirty;
1709 zfs_fuid_txhold(zfsvfs, tx);
1710 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1711 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1712 if (!zfsvfs->z_use_sa &&
1713 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1714 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1715 0, acl_ids.z_aclp->z_acl_bytes);
1717 error = dmu_tx_assign(tx, TXG_NOWAIT);
1719 zfs_dirent_unlock(dl);
1720 if (error == ERESTART) {
1725 zfs_acl_ids_free(&acl_ids);
1730 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1733 zfs_fuid_sync(zfsvfs, tx);
1735 (void) zfs_link_create(dl, zp, tx, ZNEW);
1736 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1737 if (flag & FIGNORECASE)
1739 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1740 vsecp, acl_ids.z_fuidp, vap);
1741 zfs_acl_ids_free(&acl_ids);
1744 int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1747 zfs_acl_ids_free(&acl_ids);
1751 * A directory entry already exists for this name.
1754 * Can't truncate an existing file if in exclusive mode.
1757 error = SET_ERROR(EEXIST);
1761 * Can't open a directory for writing.
1763 if ((ZTOV(zp)->v_type == VDIR) && (mode & S_IWRITE)) {
1764 error = SET_ERROR(EISDIR);
1768 * Verify requested access to file.
1770 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
1774 mutex_enter(&dzp->z_lock);
1776 mutex_exit(&dzp->z_lock);
1779 * Truncate regular files if requested.
1781 if ((ZTOV(zp)->v_type == VREG) &&
1782 (vap->va_mask & AT_SIZE) && (vap->va_size == 0)) {
1783 /* we can't hold any locks when calling zfs_freesp() */
1784 zfs_dirent_unlock(dl);
1786 error = zfs_freesp(zp, 0, 0, mode, TRUE);
1788 vnevent_create(ZTOV(zp), ct);
1794 zfs_dirent_unlock(dl);
1801 error = specvp_check(vpp, cr);
1804 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1805 zil_commit(zilog, 0);
1812 * Remove an entry from a directory.
1814 * IN: dvp - vnode of directory to remove entry from.
1815 * name - name of entry to remove.
1816 * cr - credentials of caller.
1817 * ct - caller context
1818 * flags - case flags
1820 * RETURN: 0 on success, error code on failure.
1824 * vp - ctime (if nlink > 0)
1827 uint64_t null_xattr = 0;
1831 zfs_remove(vnode_t *dvp, char *name, cred_t *cr, caller_context_t *ct,
1834 znode_t *zp, *dzp = VTOZ(dvp);
1837 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
1839 uint64_t acl_obj, xattr_obj;
1840 uint64_t xattr_obj_unlinked = 0;
1844 boolean_t may_delete_now, delete_now = FALSE;
1845 boolean_t unlinked, toobig = FALSE;
1847 pathname_t *realnmp = NULL;
1854 zilog = zfsvfs->z_log;
1856 if (flags & FIGNORECASE) {
1866 * Attempt to lock directory; fail if entry doesn't exist.
1868 if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1878 if (error = zfs_zaccess_delete(dzp, zp, cr)) {
1883 * Need to use rmdir for removing directories.
1885 if (vp->v_type == VDIR) {
1886 error = SET_ERROR(EPERM);
1890 vnevent_remove(vp, dvp, name, ct);
1893 dnlc_remove(dvp, realnmp->pn_buf);
1895 dnlc_remove(dvp, name);
1898 may_delete_now = vp->v_count == 1 && !vn_has_cached_data(vp);
1902 * We may delete the znode now, or we may put it in the unlinked set;
1903 * it depends on whether we're the last link, and on whether there are
1904 * other holds on the vnode. So we dmu_tx_hold() the right things to
1905 * allow for either case.
1908 tx = dmu_tx_create(zfsvfs->z_os);
1909 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1910 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1911 zfs_sa_upgrade_txholds(tx, zp);
1912 zfs_sa_upgrade_txholds(tx, dzp);
1913 if (may_delete_now) {
1915 zp->z_size > zp->z_blksz * DMU_MAX_DELETEBLKCNT;
1916 /* if the file is too big, only hold_free a token amount */
1917 dmu_tx_hold_free(tx, zp->z_id, 0,
1918 (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
1921 /* are there any extended attributes? */
1922 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1923 &xattr_obj, sizeof (xattr_obj));
1924 if (error == 0 && xattr_obj) {
1925 error = zfs_zget(zfsvfs, xattr_obj, &xzp);
1927 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1928 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1931 mutex_enter(&zp->z_lock);
1932 if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
1933 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
1934 mutex_exit(&zp->z_lock);
1936 /* charge as an update -- would be nice not to charge at all */
1937 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1939 error = dmu_tx_assign(tx, TXG_NOWAIT);
1941 zfs_dirent_unlock(dl);
1945 if (error == ERESTART) {
1958 * Remove the directory entry.
1960 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
1970 * Hold z_lock so that we can make sure that the ACL obj
1971 * hasn't changed. Could have been deleted due to
1974 mutex_enter(&zp->z_lock);
1976 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1977 &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
1978 delete_now = may_delete_now && !toobig &&
1979 vp->v_count == 1 && !vn_has_cached_data(vp) &&
1980 xattr_obj == xattr_obj_unlinked && zfs_external_acl(zp) ==
1987 panic("zfs_remove: delete_now branch taken");
1989 if (xattr_obj_unlinked) {
1990 ASSERT3U(xzp->z_links, ==, 2);
1991 mutex_enter(&xzp->z_lock);
1992 xzp->z_unlinked = 1;
1994 error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
1995 &xzp->z_links, sizeof (xzp->z_links), tx);
1996 ASSERT3U(error, ==, 0);
1997 mutex_exit(&xzp->z_lock);
1998 zfs_unlinked_add(xzp, tx);
2001 error = sa_remove(zp->z_sa_hdl,
2002 SA_ZPL_XATTR(zfsvfs), tx);
2004 error = sa_update(zp->z_sa_hdl,
2005 SA_ZPL_XATTR(zfsvfs), &null_xattr,
2006 sizeof (uint64_t), tx);
2011 ASSERT0(vp->v_count);
2013 mutex_exit(&zp->z_lock);
2014 zfs_znode_delete(zp, tx);
2015 } else if (unlinked) {
2016 mutex_exit(&zp->z_lock);
2017 zfs_unlinked_add(zp, tx);
2019 vp->v_vflag |= VV_NOSYNC;
2024 if (flags & FIGNORECASE)
2026 zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
2033 zfs_dirent_unlock(dl);
2040 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2041 zil_commit(zilog, 0);
2048 * Create a new directory and insert it into dvp using the name
2049 * provided. Return a pointer to the inserted directory.
2051 * IN: dvp - vnode of directory to add subdir to.
2052 * dirname - name of new directory.
2053 * vap - attributes of new directory.
2054 * cr - credentials of caller.
2055 * ct - caller context
2056 * flags - case flags
2057 * vsecp - ACL to be set
2059 * OUT: vpp - vnode of created directory.
2061 * RETURN: 0 on success, error code on failure.
2064 * dvp - ctime|mtime updated
2065 * vp - ctime|mtime|atime updated
2069 zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr,
2070 caller_context_t *ct, int flags, vsecattr_t *vsecp)
2072 znode_t *zp, *dzp = VTOZ(dvp);
2073 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
2082 gid_t gid = crgetgid(cr);
2083 zfs_acl_ids_t acl_ids;
2084 boolean_t fuid_dirtied;
2086 ASSERT(vap->va_type == VDIR);
2089 * If we have an ephemeral id, ACL, or XVATTR then
2090 * make sure file system is at proper version
2093 ksid = crgetsid(cr, KSID_OWNER);
2095 uid = ksid_getid(ksid);
2098 if (zfsvfs->z_use_fuids == B_FALSE &&
2099 (vsecp || (vap->va_mask & AT_XVATTR) ||
2100 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
2101 return (SET_ERROR(EINVAL));
2105 zilog = zfsvfs->z_log;
2107 if (dzp->z_pflags & ZFS_XATTR) {
2109 return (SET_ERROR(EINVAL));
2112 if (zfsvfs->z_utf8 && u8_validate(dirname,
2113 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
2115 return (SET_ERROR(EILSEQ));
2117 if (flags & FIGNORECASE)
2120 if (vap->va_mask & AT_XVATTR) {
2121 if ((error = secpolicy_xvattr(dvp, (xvattr_t *)vap,
2122 crgetuid(cr), cr, vap->va_type)) != 0) {
2128 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
2129 vsecp, &acl_ids)) != 0) {
2134 * First make sure the new directory doesn't exist.
2136 * Existence is checked first to make sure we don't return
2137 * EACCES instead of EEXIST which can cause some applications
2143 if (error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
2145 zfs_acl_ids_free(&acl_ids);
2150 if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr)) {
2151 zfs_acl_ids_free(&acl_ids);
2152 zfs_dirent_unlock(dl);
2157 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
2158 zfs_acl_ids_free(&acl_ids);
2159 zfs_dirent_unlock(dl);
2161 return (SET_ERROR(EDQUOT));
2165 * Add a new entry to the directory.
2167 tx = dmu_tx_create(zfsvfs->z_os);
2168 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
2169 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
2170 fuid_dirtied = zfsvfs->z_fuid_dirty;
2172 zfs_fuid_txhold(zfsvfs, tx);
2173 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2174 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
2175 acl_ids.z_aclp->z_acl_bytes);
2178 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
2179 ZFS_SA_BASE_ATTR_SIZE);
2181 error = dmu_tx_assign(tx, TXG_NOWAIT);
2183 zfs_dirent_unlock(dl);
2184 if (error == ERESTART) {
2189 zfs_acl_ids_free(&acl_ids);
2198 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
2201 zfs_fuid_sync(zfsvfs, tx);
2204 * Now put new name in parent dir.
2206 (void) zfs_link_create(dl, zp, tx, ZNEW);
2210 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
2211 if (flags & FIGNORECASE)
2213 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
2214 acl_ids.z_fuidp, vap);
2216 zfs_acl_ids_free(&acl_ids);
2220 zfs_dirent_unlock(dl);
2222 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2223 zil_commit(zilog, 0);
2230 * Remove a directory subdir entry. If the current working
2231 * directory is the same as the subdir to be removed, the
2234 * IN: dvp - vnode of directory to remove from.
2235 * name - name of directory to be removed.
2236 * cwd - vnode of current working directory.
2237 * cr - credentials of caller.
2238 * ct - caller context
2239 * flags - case flags
2241 * RETURN: 0 on success, error code on failure.
2244 * dvp - ctime|mtime updated
2248 zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr,
2249 caller_context_t *ct, int flags)
2251 znode_t *dzp = VTOZ(dvp);
2254 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
2263 zilog = zfsvfs->z_log;
2265 if (flags & FIGNORECASE)
2271 * Attempt to lock directory; fail if entry doesn't exist.
2273 if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
2281 if (error = zfs_zaccess_delete(dzp, zp, cr)) {
2285 if (vp->v_type != VDIR) {
2286 error = SET_ERROR(ENOTDIR);
2291 error = SET_ERROR(EINVAL);
2295 vnevent_rmdir(vp, dvp, name, ct);
2298 * Grab a lock on the directory to make sure that noone is
2299 * trying to add (or lookup) entries while we are removing it.
2301 rw_enter(&zp->z_name_lock, RW_WRITER);
2304 * Grab a lock on the parent pointer to make sure we play well
2305 * with the treewalk and directory rename code.
2307 rw_enter(&zp->z_parent_lock, RW_WRITER);
2309 tx = dmu_tx_create(zfsvfs->z_os);
2310 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
2311 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2312 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
2313 zfs_sa_upgrade_txholds(tx, zp);
2314 zfs_sa_upgrade_txholds(tx, dzp);
2315 error = dmu_tx_assign(tx, TXG_NOWAIT);
2317 rw_exit(&zp->z_parent_lock);
2318 rw_exit(&zp->z_name_lock);
2319 zfs_dirent_unlock(dl);
2321 if (error == ERESTART) {
2331 #ifdef FREEBSD_NAMECACHE
2335 error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
2338 uint64_t txtype = TX_RMDIR;
2339 if (flags & FIGNORECASE)
2341 zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
2346 rw_exit(&zp->z_parent_lock);
2347 rw_exit(&zp->z_name_lock);
2348 #ifdef FREEBSD_NAMECACHE
2352 zfs_dirent_unlock(dl);
2356 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
2357 zil_commit(zilog, 0);
2364 * Read as many directory entries as will fit into the provided
2365 * buffer from the given directory cursor position (specified in
2366 * the uio structure).
2368 * IN: vp - vnode of directory to read.
2369 * uio - structure supplying read location, range info,
2370 * and return buffer.
2371 * cr - credentials of caller.
2372 * ct - caller context
2373 * flags - case flags
2375 * OUT: uio - updated offset and range, buffer filled.
2376 * eofp - set to true if end-of-file detected.
2378 * RETURN: 0 on success, error code on failure.
2381 * vp - atime updated
2383 * Note that the low 4 bits of the cookie returned by zap is always zero.
2384 * This allows us to use the low range for "special" directory entries:
2385 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
2386 * we use the offset 2 for the '.zfs' directory.
2390 zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, int *ncookies, u_long **cookies)
2392 znode_t *zp = VTOZ(vp);
2396 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2401 zap_attribute_t zap;
2402 uint_t bytes_wanted;
2403 uint64_t offset; /* must be unsigned; checks for < 1 */
2409 boolean_t check_sysattrs;
2412 u_long *cooks = NULL;
2418 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
2419 &parent, sizeof (parent))) != 0) {
2425 * If we are not given an eof variable,
2432 * Check for valid iov_len.
2434 if (uio->uio_iov->iov_len <= 0) {
2436 return (SET_ERROR(EINVAL));
2440 * Quit if directory has been removed (posix)
2442 if ((*eofp = zp->z_unlinked) != 0) {
2449 offset = uio->uio_loffset;
2450 prefetch = zp->z_zn_prefetch;
2453 * Initialize the iterator cursor.
2457 * Start iteration from the beginning of the directory.
2459 zap_cursor_init(&zc, os, zp->z_id);
2462 * The offset is a serialized cursor.
2464 zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
2468 * Get space to change directory entries into fs independent format.
2470 iovp = uio->uio_iov;
2471 bytes_wanted = iovp->iov_len;
2472 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) {
2473 bufsize = bytes_wanted;
2474 outbuf = kmem_alloc(bufsize, KM_SLEEP);
2475 odp = (struct dirent64 *)outbuf;
2477 bufsize = bytes_wanted;
2479 odp = (struct dirent64 *)iovp->iov_base;
2481 eodp = (struct edirent *)odp;
2483 if (ncookies != NULL) {
2485 * Minimum entry size is dirent size and 1 byte for a file name.
2487 ncooks = uio->uio_resid / (sizeof(struct dirent) - sizeof(((struct dirent *)NULL)->d_name) + 1);
2488 cooks = malloc(ncooks * sizeof(u_long), M_TEMP, M_WAITOK);
2493 * If this VFS supports the system attribute view interface; and
2494 * we're looking at an extended attribute directory; and we care
2495 * about normalization conflicts on this vfs; then we must check
2496 * for normalization conflicts with the sysattr name space.
2499 check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
2500 (vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm &&
2501 (flags & V_RDDIR_ENTFLAGS);
2507 * Transform to file-system independent format
2510 while (outcount < bytes_wanted) {
2513 off64_t *next = NULL;
2516 * Special case `.', `..', and `.zfs'.
2519 (void) strcpy(zap.za_name, ".");
2520 zap.za_normalization_conflict = 0;
2523 } else if (offset == 1) {
2524 (void) strcpy(zap.za_name, "..");
2525 zap.za_normalization_conflict = 0;
2528 } else if (offset == 2 && zfs_show_ctldir(zp)) {
2529 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
2530 zap.za_normalization_conflict = 0;
2531 objnum = ZFSCTL_INO_ROOT;
2537 if (error = zap_cursor_retrieve(&zc, &zap)) {
2538 if ((*eofp = (error == ENOENT)) != 0)
2544 if (zap.za_integer_length != 8 ||
2545 zap.za_num_integers != 1) {
2546 cmn_err(CE_WARN, "zap_readdir: bad directory "
2547 "entry, obj = %lld, offset = %lld\n",
2548 (u_longlong_t)zp->z_id,
2549 (u_longlong_t)offset);
2550 error = SET_ERROR(ENXIO);
2554 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
2556 * MacOS X can extract the object type here such as:
2557 * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2559 type = ZFS_DIRENT_TYPE(zap.za_first_integer);
2561 if (check_sysattrs && !zap.za_normalization_conflict) {
2563 zap.za_normalization_conflict =
2564 xattr_sysattr_casechk(zap.za_name);
2566 panic("%s:%u: TODO", __func__, __LINE__);
2571 if (flags & V_RDDIR_ACCFILTER) {
2573 * If we have no access at all, don't include
2574 * this entry in the returned information
2577 if (zfs_zget(zp->z_zfsvfs, objnum, &ezp) != 0)
2579 if (!zfs_has_access(ezp, cr)) {
2586 if (flags & V_RDDIR_ENTFLAGS)
2587 reclen = EDIRENT_RECLEN(strlen(zap.za_name));
2589 reclen = DIRENT64_RECLEN(strlen(zap.za_name));
2592 * Will this entry fit in the buffer?
2594 if (outcount + reclen > bufsize) {
2596 * Did we manage to fit anything in the buffer?
2599 error = SET_ERROR(EINVAL);
2604 if (flags & V_RDDIR_ENTFLAGS) {
2606 * Add extended flag entry:
2608 eodp->ed_ino = objnum;
2609 eodp->ed_reclen = reclen;
2610 /* NOTE: ed_off is the offset for the *next* entry */
2611 next = &(eodp->ed_off);
2612 eodp->ed_eflags = zap.za_normalization_conflict ?
2613 ED_CASE_CONFLICT : 0;
2614 (void) strncpy(eodp->ed_name, zap.za_name,
2615 EDIRENT_NAMELEN(reclen));
2616 eodp = (edirent_t *)((intptr_t)eodp + reclen);
2621 odp->d_ino = objnum;
2622 odp->d_reclen = reclen;
2623 odp->d_namlen = strlen(zap.za_name);
2624 (void) strlcpy(odp->d_name, zap.za_name, odp->d_namlen + 1);
2626 odp = (dirent64_t *)((intptr_t)odp + reclen);
2630 ASSERT(outcount <= bufsize);
2632 /* Prefetch znode */
2634 dmu_prefetch(os, objnum, 0, 0);
2638 * Move to the next entry, fill in the previous offset.
2640 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
2641 zap_cursor_advance(&zc);
2642 offset = zap_cursor_serialize(&zc);
2647 if (cooks != NULL) {
2650 KASSERT(ncooks >= 0, ("ncookies=%d", ncooks));
2653 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
2655 /* Subtract unused cookies */
2656 if (ncookies != NULL)
2657 *ncookies -= ncooks;
2659 if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) {
2660 iovp->iov_base += outcount;
2661 iovp->iov_len -= outcount;
2662 uio->uio_resid -= outcount;
2663 } else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) {
2665 * Reset the pointer.
2667 offset = uio->uio_loffset;
2671 zap_cursor_fini(&zc);
2672 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
2673 kmem_free(outbuf, bufsize);
2675 if (error == ENOENT)
2678 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
2680 uio->uio_loffset = offset;
2682 if (error != 0 && cookies != NULL) {
2683 free(*cookies, M_TEMP);
2690 ulong_t zfs_fsync_sync_cnt = 4;
2693 zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
2695 znode_t *zp = VTOZ(vp);
2696 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2698 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2700 if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
2703 zil_commit(zfsvfs->z_log, zp->z_id);
2711 * Get the requested file attributes and place them in the provided
2714 * IN: vp - vnode of file.
2715 * vap - va_mask identifies requested attributes.
2716 * If AT_XVATTR set, then optional attrs are requested
2717 * flags - ATTR_NOACLCHECK (CIFS server context)
2718 * cr - credentials of caller.
2719 * ct - caller context
2721 * OUT: vap - attribute values.
2723 * RETURN: 0 (always succeeds).
2727 zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2728 caller_context_t *ct)
2730 znode_t *zp = VTOZ(vp);
2731 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2734 u_longlong_t nblocks;
2736 uint64_t mtime[2], ctime[2], crtime[2], rdev;
2737 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2738 xoptattr_t *xoap = NULL;
2739 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2740 sa_bulk_attr_t bulk[4];
2746 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
2748 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
2749 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
2750 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16);
2751 if (vp->v_type == VBLK || vp->v_type == VCHR)
2752 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
2755 if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
2761 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2762 * Also, if we are the owner don't bother, since owner should
2763 * always be allowed to read basic attributes of file.
2765 if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
2766 (vap->va_uid != crgetuid(cr))) {
2767 if (error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2775 * Return all attributes. It's cheaper to provide the answer
2776 * than to determine whether we were asked the question.
2779 mutex_enter(&zp->z_lock);
2780 vap->va_type = IFTOVT(zp->z_mode);
2781 vap->va_mode = zp->z_mode & ~S_IFMT;
2783 vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev;
2785 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
2787 vap->va_nodeid = zp->z_id;
2788 if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp))
2789 links = zp->z_links + 1;
2791 links = zp->z_links;
2792 vap->va_nlink = MIN(links, LINK_MAX); /* nlink_t limit! */
2793 vap->va_size = zp->z_size;
2795 vap->va_rdev = vp->v_rdev;
2797 if (vp->v_type == VBLK || vp->v_type == VCHR)
2798 vap->va_rdev = zfs_cmpldev(rdev);
2800 vap->va_seq = zp->z_seq;
2801 vap->va_flags = 0; /* FreeBSD: Reset chflags(2) flags. */
2802 vap->va_filerev = zp->z_seq;
2805 * Add in any requested optional attributes and the create time.
2806 * Also set the corresponding bits in the returned attribute bitmap.
2808 if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
2809 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2811 ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2812 XVA_SET_RTN(xvap, XAT_ARCHIVE);
2815 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2816 xoap->xoa_readonly =
2817 ((zp->z_pflags & ZFS_READONLY) != 0);
2818 XVA_SET_RTN(xvap, XAT_READONLY);
2821 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2823 ((zp->z_pflags & ZFS_SYSTEM) != 0);
2824 XVA_SET_RTN(xvap, XAT_SYSTEM);
2827 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2829 ((zp->z_pflags & ZFS_HIDDEN) != 0);
2830 XVA_SET_RTN(xvap, XAT_HIDDEN);
2833 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2834 xoap->xoa_nounlink =
2835 ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2836 XVA_SET_RTN(xvap, XAT_NOUNLINK);
2839 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2840 xoap->xoa_immutable =
2841 ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2842 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2845 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2846 xoap->xoa_appendonly =
2847 ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2848 XVA_SET_RTN(xvap, XAT_APPENDONLY);
2851 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2853 ((zp->z_pflags & ZFS_NODUMP) != 0);
2854 XVA_SET_RTN(xvap, XAT_NODUMP);
2857 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2859 ((zp->z_pflags & ZFS_OPAQUE) != 0);
2860 XVA_SET_RTN(xvap, XAT_OPAQUE);
2863 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2864 xoap->xoa_av_quarantined =
2865 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2866 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2869 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2870 xoap->xoa_av_modified =
2871 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2872 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2875 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2876 vp->v_type == VREG) {
2877 zfs_sa_get_scanstamp(zp, xvap);
2880 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
2883 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
2884 times, sizeof (times));
2885 ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
2886 XVA_SET_RTN(xvap, XAT_CREATETIME);
2889 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2890 xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2891 XVA_SET_RTN(xvap, XAT_REPARSE);
2893 if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2894 xoap->xoa_generation = zp->z_gen;
2895 XVA_SET_RTN(xvap, XAT_GEN);
2898 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2900 ((zp->z_pflags & ZFS_OFFLINE) != 0);
2901 XVA_SET_RTN(xvap, XAT_OFFLINE);
2904 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2906 ((zp->z_pflags & ZFS_SPARSE) != 0);
2907 XVA_SET_RTN(xvap, XAT_SPARSE);
2911 ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
2912 ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2913 ZFS_TIME_DECODE(&vap->va_ctime, ctime);
2914 ZFS_TIME_DECODE(&vap->va_birthtime, crtime);
2916 mutex_exit(&zp->z_lock);
2918 sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
2919 vap->va_blksize = blksize;
2920 vap->va_bytes = nblocks << 9; /* nblocks * 512 */
2922 if (zp->z_blksz == 0) {
2924 * Block size hasn't been set; suggest maximal I/O transfers.
2926 vap->va_blksize = zfsvfs->z_max_blksz;
2934 * Set the file attributes to the values contained in the
2937 * IN: vp - vnode of file to be modified.
2938 * vap - new attribute values.
2939 * If AT_XVATTR set, then optional attrs are being set
2940 * flags - ATTR_UTIME set if non-default time values provided.
2941 * - ATTR_NOACLCHECK (CIFS context only).
2942 * cr - credentials of caller.
2943 * ct - caller context
2945 * RETURN: 0 on success, error code on failure.
2948 * vp - ctime updated, mtime updated if size changed.
2952 zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
2953 caller_context_t *ct)
2955 znode_t *zp = VTOZ(vp);
2956 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
2961 uint_t mask = vap->va_mask;
2962 uint_t saved_mask = 0;
2963 uint64_t saved_mode;
2966 uint64_t new_uid, new_gid;
2968 uint64_t mtime[2], ctime[2];
2970 int need_policy = FALSE;
2972 zfs_fuid_info_t *fuidp = NULL;
2973 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2976 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2977 boolean_t fuid_dirtied = B_FALSE;
2978 sa_bulk_attr_t bulk[7], xattr_bulk[7];
2979 int count = 0, xattr_count = 0;
2984 if (mask & AT_NOSET)
2985 return (SET_ERROR(EINVAL));
2990 zilog = zfsvfs->z_log;
2993 * Make sure that if we have ephemeral uid/gid or xvattr specified
2994 * that file system is at proper version level
2997 if (zfsvfs->z_use_fuids == B_FALSE &&
2998 (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2999 ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) ||
3000 (mask & AT_XVATTR))) {
3002 return (SET_ERROR(EINVAL));
3005 if (mask & AT_SIZE && vp->v_type == VDIR) {
3007 return (SET_ERROR(EISDIR));
3010 if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) {
3012 return (SET_ERROR(EINVAL));
3016 * If this is an xvattr_t, then get a pointer to the structure of
3017 * optional attributes. If this is NULL, then we have a vattr_t.
3019 xoap = xva_getxoptattr(xvap);
3021 xva_init(&tmpxvattr);
3024 * Immutable files can only alter immutable bit and atime
3026 if ((zp->z_pflags & ZFS_IMMUTABLE) &&
3027 ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) ||
3028 ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
3030 return (SET_ERROR(EPERM));
3033 if ((mask & AT_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
3035 return (SET_ERROR(EPERM));
3039 * Verify timestamps doesn't overflow 32 bits.
3040 * ZFS can handle large timestamps, but 32bit syscalls can't
3041 * handle times greater than 2039. This check should be removed
3042 * once large timestamps are fully supported.
3044 if (mask & (AT_ATIME | AT_MTIME)) {
3045 if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
3046 ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
3048 return (SET_ERROR(EOVERFLOW));
3056 /* Can this be moved to before the top label? */
3057 if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
3059 return (SET_ERROR(EROFS));
3063 * First validate permissions
3066 if (mask & AT_SIZE) {
3068 * XXX - Note, we are not providing any open
3069 * mode flags here (like FNDELAY), so we may
3070 * block if there are locks present... this
3071 * should be addressed in openat().
3073 /* XXX - would it be OK to generate a log record here? */
3074 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
3081 if (mask & (AT_ATIME|AT_MTIME) ||
3082 ((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
3083 XVA_ISSET_REQ(xvap, XAT_READONLY) ||
3084 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
3085 XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
3086 XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
3087 XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
3088 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
3089 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
3093 if (mask & (AT_UID|AT_GID)) {
3094 int idmask = (mask & (AT_UID|AT_GID));
3099 * NOTE: even if a new mode is being set,
3100 * we may clear S_ISUID/S_ISGID bits.
3103 if (!(mask & AT_MODE))
3104 vap->va_mode = zp->z_mode;
3107 * Take ownership or chgrp to group we are a member of
3110 take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr));
3111 take_group = (mask & AT_GID) &&
3112 zfs_groupmember(zfsvfs, vap->va_gid, cr);
3115 * If both AT_UID and AT_GID are set then take_owner and
3116 * take_group must both be set in order to allow taking
3119 * Otherwise, send the check through secpolicy_vnode_setattr()
3123 if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) ||
3124 ((idmask == AT_UID) && take_owner) ||
3125 ((idmask == AT_GID) && take_group)) {
3126 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
3127 skipaclchk, cr) == 0) {
3129 * Remove setuid/setgid for non-privileged users
3131 secpolicy_setid_clear(vap, vp, cr);
3132 trim_mask = (mask & (AT_UID|AT_GID));
3141 mutex_enter(&zp->z_lock);
3142 oldva.va_mode = zp->z_mode;
3143 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
3144 if (mask & AT_XVATTR) {
3146 * Update xvattr mask to include only those attributes
3147 * that are actually changing.
3149 * the bits will be restored prior to actually setting
3150 * the attributes so the caller thinks they were set.
3152 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
3153 if (xoap->xoa_appendonly !=
3154 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
3157 XVA_CLR_REQ(xvap, XAT_APPENDONLY);
3158 XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY);
3162 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
3163 if (xoap->xoa_nounlink !=
3164 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
3167 XVA_CLR_REQ(xvap, XAT_NOUNLINK);
3168 XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK);
3172 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
3173 if (xoap->xoa_immutable !=
3174 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
3177 XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
3178 XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE);
3182 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
3183 if (xoap->xoa_nodump !=
3184 ((zp->z_pflags & ZFS_NODUMP) != 0)) {
3187 XVA_CLR_REQ(xvap, XAT_NODUMP);
3188 XVA_SET_REQ(&tmpxvattr, XAT_NODUMP);
3192 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
3193 if (xoap->xoa_av_modified !=
3194 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
3197 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
3198 XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED);
3202 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
3203 if ((vp->v_type != VREG &&
3204 xoap->xoa_av_quarantined) ||
3205 xoap->xoa_av_quarantined !=
3206 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
3209 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
3210 XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED);
3214 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
3215 mutex_exit(&zp->z_lock);
3217 return (SET_ERROR(EPERM));
3220 if (need_policy == FALSE &&
3221 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
3222 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
3227 mutex_exit(&zp->z_lock);
3229 if (mask & AT_MODE) {
3230 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
3231 err = secpolicy_setid_setsticky_clear(vp, vap,
3237 trim_mask |= AT_MODE;
3245 * If trim_mask is set then take ownership
3246 * has been granted or write_acl is present and user
3247 * has the ability to modify mode. In that case remove
3248 * UID|GID and or MODE from mask so that
3249 * secpolicy_vnode_setattr() doesn't revoke it.
3253 saved_mask = vap->va_mask;
3254 vap->va_mask &= ~trim_mask;
3255 if (trim_mask & AT_MODE) {
3257 * Save the mode, as secpolicy_vnode_setattr()
3258 * will overwrite it with ova.va_mode.
3260 saved_mode = vap->va_mode;
3263 err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
3264 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
3271 vap->va_mask |= saved_mask;
3272 if (trim_mask & AT_MODE) {
3274 * Recover the mode after
3275 * secpolicy_vnode_setattr().
3277 vap->va_mode = saved_mode;
3283 * secpolicy_vnode_setattr, or take ownership may have
3286 mask = vap->va_mask;
3288 if ((mask & (AT_UID | AT_GID))) {
3289 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
3290 &xattr_obj, sizeof (xattr_obj));
3292 if (err == 0 && xattr_obj) {
3293 err = zfs_zget(zp->z_zfsvfs, xattr_obj, &attrzp);
3297 if (mask & AT_UID) {
3298 new_uid = zfs_fuid_create(zfsvfs,
3299 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
3300 if (new_uid != zp->z_uid &&
3301 zfs_fuid_overquota(zfsvfs, B_FALSE, new_uid)) {
3303 VN_RELE(ZTOV(attrzp));
3304 err = SET_ERROR(EDQUOT);
3309 if (mask & AT_GID) {
3310 new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
3311 cr, ZFS_GROUP, &fuidp);
3312 if (new_gid != zp->z_gid &&
3313 zfs_fuid_overquota(zfsvfs, B_TRUE, new_gid)) {
3315 VN_RELE(ZTOV(attrzp));
3316 err = SET_ERROR(EDQUOT);
3321 tx = dmu_tx_create(zfsvfs->z_os);
3323 if (mask & AT_MODE) {
3324 uint64_t pmode = zp->z_mode;
3326 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
3328 if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
3329 !(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
3330 err = SET_ERROR(EPERM);
3334 if (err = zfs_acl_chmod_setattr(zp, &aclp, new_mode))
3337 mutex_enter(&zp->z_lock);
3338 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
3340 * Are we upgrading ACL from old V0 format
3343 if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
3344 zfs_znode_acl_version(zp) ==
3345 ZFS_ACL_VERSION_INITIAL) {
3346 dmu_tx_hold_free(tx, acl_obj, 0,
3348 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3349 0, aclp->z_acl_bytes);
3351 dmu_tx_hold_write(tx, acl_obj, 0,
3354 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3355 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3356 0, aclp->z_acl_bytes);
3358 mutex_exit(&zp->z_lock);
3359 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3361 if ((mask & AT_XVATTR) &&
3362 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3363 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
3365 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3369 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
3372 fuid_dirtied = zfsvfs->z_fuid_dirty;
3374 zfs_fuid_txhold(zfsvfs, tx);
3376 zfs_sa_upgrade_txholds(tx, zp);
3378 err = dmu_tx_assign(tx, TXG_NOWAIT);
3380 if (err == ERESTART)
3387 * Set each attribute requested.
3388 * We group settings according to the locks they need to acquire.
3390 * Note: you cannot set ctime directly, although it will be
3391 * updated as a side-effect of calling this function.
3395 if (mask & (AT_UID|AT_GID|AT_MODE))
3396 mutex_enter(&zp->z_acl_lock);
3397 mutex_enter(&zp->z_lock);
3399 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
3400 &zp->z_pflags, sizeof (zp->z_pflags));
3403 if (mask & (AT_UID|AT_GID|AT_MODE))
3404 mutex_enter(&attrzp->z_acl_lock);
3405 mutex_enter(&attrzp->z_lock);
3406 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3407 SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
3408 sizeof (attrzp->z_pflags));
3411 if (mask & (AT_UID|AT_GID)) {
3413 if (mask & AT_UID) {
3414 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
3415 &new_uid, sizeof (new_uid));
3416 zp->z_uid = new_uid;
3418 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3419 SA_ZPL_UID(zfsvfs), NULL, &new_uid,
3421 attrzp->z_uid = new_uid;
3425 if (mask & AT_GID) {
3426 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
3427 NULL, &new_gid, sizeof (new_gid));
3428 zp->z_gid = new_gid;
3430 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3431 SA_ZPL_GID(zfsvfs), NULL, &new_gid,
3433 attrzp->z_gid = new_gid;
3436 if (!(mask & AT_MODE)) {
3437 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
3438 NULL, &new_mode, sizeof (new_mode));
3439 new_mode = zp->z_mode;
3441 err = zfs_acl_chown_setattr(zp);
3444 err = zfs_acl_chown_setattr(attrzp);
3449 if (mask & AT_MODE) {
3450 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
3451 &new_mode, sizeof (new_mode));
3452 zp->z_mode = new_mode;
3453 ASSERT3U((uintptr_t)aclp, !=, 0);
3454 err = zfs_aclset_common(zp, aclp, cr, tx);
3456 if (zp->z_acl_cached)
3457 zfs_acl_free(zp->z_acl_cached);
3458 zp->z_acl_cached = aclp;
3463 if (mask & AT_ATIME) {
3464 ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
3465 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
3466 &zp->z_atime, sizeof (zp->z_atime));
3469 if (mask & AT_MTIME) {
3470 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
3471 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
3472 mtime, sizeof (mtime));
3475 /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
3476 if (mask & AT_SIZE && !(mask & AT_MTIME)) {
3477 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
3478 NULL, mtime, sizeof (mtime));
3479 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3480 &ctime, sizeof (ctime));
3481 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
3483 } else if (mask != 0) {
3484 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3485 &ctime, sizeof (ctime));
3486 zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime,
3489 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
3490 SA_ZPL_CTIME(zfsvfs), NULL,
3491 &ctime, sizeof (ctime));
3492 zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
3493 mtime, ctime, B_TRUE);
3497 * Do this after setting timestamps to prevent timestamp
3498 * update from toggling bit
3501 if (xoap && (mask & AT_XVATTR)) {
3504 * restore trimmed off masks
3505 * so that return masks can be set for caller.
3508 if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) {
3509 XVA_SET_REQ(xvap, XAT_APPENDONLY);
3511 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) {
3512 XVA_SET_REQ(xvap, XAT_NOUNLINK);
3514 if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) {
3515 XVA_SET_REQ(xvap, XAT_IMMUTABLE);
3517 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) {
3518 XVA_SET_REQ(xvap, XAT_NODUMP);
3520 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) {
3521 XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
3523 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) {
3524 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
3527 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
3528 ASSERT(vp->v_type == VREG);
3530 zfs_xvattr_set(zp, xvap, tx);
3534 zfs_fuid_sync(zfsvfs, tx);
3537 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
3539 mutex_exit(&zp->z_lock);
3540 if (mask & (AT_UID|AT_GID|AT_MODE))
3541 mutex_exit(&zp->z_acl_lock);
3544 if (mask & (AT_UID|AT_GID|AT_MODE))
3545 mutex_exit(&attrzp->z_acl_lock);
3546 mutex_exit(&attrzp->z_lock);
3549 if (err == 0 && attrzp) {
3550 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
3556 VN_RELE(ZTOV(attrzp));
3562 zfs_fuid_info_free(fuidp);
3568 if (err == ERESTART)
3571 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
3576 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3577 zil_commit(zilog, 0);
3583 typedef struct zfs_zlock {
3584 krwlock_t *zl_rwlock; /* lock we acquired */
3585 znode_t *zl_znode; /* znode we held */
3586 struct zfs_zlock *zl_next; /* next in list */
3590 * Drop locks and release vnodes that were held by zfs_rename_lock().
3593 zfs_rename_unlock(zfs_zlock_t **zlpp)
3597 while ((zl = *zlpp) != NULL) {
3598 if (zl->zl_znode != NULL)
3599 VN_RELE(ZTOV(zl->zl_znode));
3600 rw_exit(zl->zl_rwlock);
3601 *zlpp = zl->zl_next;
3602 kmem_free(zl, sizeof (*zl));
3607 * Search back through the directory tree, using the ".." entries.
3608 * Lock each directory in the chain to prevent concurrent renames.
3609 * Fail any attempt to move a directory into one of its own descendants.
3610 * XXX - z_parent_lock can overlap with map or grow locks
3613 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
3617 uint64_t rootid = zp->z_zfsvfs->z_root;
3618 uint64_t oidp = zp->z_id;
3619 krwlock_t *rwlp = &szp->z_parent_lock;
3620 krw_t rw = RW_WRITER;
3623 * First pass write-locks szp and compares to zp->z_id.
3624 * Later passes read-lock zp and compare to zp->z_parent.
3627 if (!rw_tryenter(rwlp, rw)) {
3629 * Another thread is renaming in this path.
3630 * Note that if we are a WRITER, we don't have any
3631 * parent_locks held yet.
3633 if (rw == RW_READER && zp->z_id > szp->z_id) {
3635 * Drop our locks and restart
3637 zfs_rename_unlock(&zl);
3641 rwlp = &szp->z_parent_lock;
3646 * Wait for other thread to drop its locks
3652 zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
3653 zl->zl_rwlock = rwlp;
3654 zl->zl_znode = NULL;
3655 zl->zl_next = *zlpp;
3658 if (oidp == szp->z_id) /* We're a descendant of szp */
3659 return (SET_ERROR(EINVAL));
3661 if (oidp == rootid) /* We've hit the top */
3664 if (rw == RW_READER) { /* i.e. not the first pass */
3665 int error = zfs_zget(zp->z_zfsvfs, oidp, &zp);
3670 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zp->z_zfsvfs),
3671 &oidp, sizeof (oidp));
3672 rwlp = &zp->z_parent_lock;
3675 } while (zp->z_id != sdzp->z_id);
3681 * Move an entry from the provided source directory to the target
3682 * directory. Change the entry name as indicated.
3684 * IN: sdvp - Source directory containing the "old entry".
3685 * snm - Old entry name.
3686 * tdvp - Target directory to contain the "new entry".
3687 * tnm - New entry name.
3688 * cr - credentials of caller.
3689 * ct - caller context
3690 * flags - case flags
3692 * RETURN: 0 on success, error code on failure.
3695 * sdvp,tdvp - ctime|mtime updated
3699 zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr,
3700 caller_context_t *ct, int flags)
3702 znode_t *tdzp, *szp, *tzp;
3703 znode_t *sdzp = VTOZ(sdvp);
3704 zfsvfs_t *zfsvfs = sdzp->z_zfsvfs;
3707 zfs_dirlock_t *sdl, *tdl;
3710 int cmp, serr, terr;
3715 ZFS_VERIFY_ZP(sdzp);
3716 zilog = zfsvfs->z_log;
3719 * Make sure we have the real vp for the target directory.
3721 if (VOP_REALVP(tdvp, &realvp, ct) == 0)
3724 if (tdvp->v_vfsp != sdvp->v_vfsp || zfsctl_is_node(tdvp)) {
3726 return (SET_ERROR(EXDEV));
3730 ZFS_VERIFY_ZP(tdzp);
3731 if (zfsvfs->z_utf8 && u8_validate(tnm,
3732 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3734 return (SET_ERROR(EILSEQ));
3737 if (flags & FIGNORECASE)
3746 * This is to prevent the creation of links into attribute space
3747 * by renaming a linked file into/outof an attribute directory.
3748 * See the comment in zfs_link() for why this is considered bad.
3750 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3752 return (SET_ERROR(EINVAL));
3756 * Lock source and target directory entries. To prevent deadlock,
3757 * a lock ordering must be defined. We lock the directory with
3758 * the smallest object id first, or if it's a tie, the one with
3759 * the lexically first name.
3761 if (sdzp->z_id < tdzp->z_id) {
3763 } else if (sdzp->z_id > tdzp->z_id) {
3767 * First compare the two name arguments without
3768 * considering any case folding.
3770 int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
3772 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
3773 ASSERT(error == 0 || !zfsvfs->z_utf8);
3776 * POSIX: "If the old argument and the new argument
3777 * both refer to links to the same existing file,
3778 * the rename() function shall return successfully
3779 * and perform no other action."
3785 * If the file system is case-folding, then we may
3786 * have some more checking to do. A case-folding file
3787 * system is either supporting mixed case sensitivity
3788 * access or is completely case-insensitive. Note
3789 * that the file system is always case preserving.
3791 * In mixed sensitivity mode case sensitive behavior
3792 * is the default. FIGNORECASE must be used to
3793 * explicitly request case insensitive behavior.
3795 * If the source and target names provided differ only
3796 * by case (e.g., a request to rename 'tim' to 'Tim'),
3797 * we will treat this as a special case in the
3798 * case-insensitive mode: as long as the source name
3799 * is an exact match, we will allow this to proceed as
3800 * a name-change request.
3802 if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
3803 (zfsvfs->z_case == ZFS_CASE_MIXED &&
3804 flags & FIGNORECASE)) &&
3805 u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
3808 * case preserving rename request, require exact
3817 * If the source and destination directories are the same, we should
3818 * grab the z_name_lock of that directory only once.
3822 rw_enter(&sdzp->z_name_lock, RW_READER);
3826 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
3827 ZEXISTS | zflg, NULL, NULL);
3828 terr = zfs_dirent_lock(&tdl,
3829 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
3831 terr = zfs_dirent_lock(&tdl,
3832 tdzp, tnm, &tzp, zflg, NULL, NULL);
3833 serr = zfs_dirent_lock(&sdl,
3834 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
3840 * Source entry invalid or not there.
3843 zfs_dirent_unlock(tdl);
3849 rw_exit(&sdzp->z_name_lock);
3852 * FreeBSD: In OpenSolaris they only check if rename source is
3853 * ".." here, because "." is handled in their lookup. This is
3854 * not the case for FreeBSD, so we check for "." explicitly.
3856 if (strcmp(snm, ".") == 0 || strcmp(snm, "..") == 0)
3857 serr = SET_ERROR(EINVAL);
3862 zfs_dirent_unlock(sdl);
3866 rw_exit(&sdzp->z_name_lock);
3868 if (strcmp(tnm, "..") == 0)
3869 terr = SET_ERROR(EINVAL);
3875 * Must have write access at the source to remove the old entry
3876 * and write access at the target to create the new entry.
3877 * Note that if target and source are the same, this can be
3878 * done in a single check.
3881 if (error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr))
3884 if (ZTOV(szp)->v_type == VDIR) {
3886 * Check to make sure rename is valid.
3887 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3889 if (error = zfs_rename_lock(szp, tdzp, sdzp, &zl))
3894 * Does target exist?
3898 * Source and target must be the same type.
3900 if (ZTOV(szp)->v_type == VDIR) {
3901 if (ZTOV(tzp)->v_type != VDIR) {
3902 error = SET_ERROR(ENOTDIR);
3906 if (ZTOV(tzp)->v_type == VDIR) {
3907 error = SET_ERROR(EISDIR);
3912 * POSIX dictates that when the source and target
3913 * entries refer to the same file object, rename
3914 * must do nothing and exit without error.
3916 if (szp->z_id == tzp->z_id) {
3922 vnevent_rename_src(ZTOV(szp), sdvp, snm, ct);
3924 vnevent_rename_dest(ZTOV(tzp), tdvp, tnm, ct);
3927 * notify the target directory if it is not the same
3928 * as source directory.
3931 vnevent_rename_dest_dir(tdvp, ct);
3934 tx = dmu_tx_create(zfsvfs->z_os);
3935 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3936 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3937 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3938 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3940 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3941 zfs_sa_upgrade_txholds(tx, tdzp);
3944 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3945 zfs_sa_upgrade_txholds(tx, tzp);
3948 zfs_sa_upgrade_txholds(tx, szp);
3949 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3950 error = dmu_tx_assign(tx, TXG_NOWAIT);
3953 zfs_rename_unlock(&zl);
3954 zfs_dirent_unlock(sdl);
3955 zfs_dirent_unlock(tdl);
3958 rw_exit(&sdzp->z_name_lock);
3963 if (error == ERESTART) {
3973 if (tzp) /* Attempt to remove the existing target */
3974 error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
3977 error = zfs_link_create(tdl, szp, tx, ZRENAMING);
3979 szp->z_pflags |= ZFS_AV_MODIFIED;
3981 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
3982 (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3985 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
3987 zfs_log_rename(zilog, tx, TX_RENAME |
3988 (flags & FIGNORECASE ? TX_CI : 0), sdzp,
3989 sdl->dl_name, tdzp, tdl->dl_name, szp);
3992 * Update path information for the target vnode
3994 vn_renamepath(tdvp, ZTOV(szp), tnm,
3998 * At this point, we have successfully created
3999 * the target name, but have failed to remove
4000 * the source name. Since the create was done
4001 * with the ZRENAMING flag, there are
4002 * complications; for one, the link count is
4003 * wrong. The easiest way to deal with this
4004 * is to remove the newly created target, and
4005 * return the original error. This must
4006 * succeed; fortunately, it is very unlikely to
4007 * fail, since we just created it.
4009 VERIFY3U(zfs_link_destroy(tdl, szp, tx,
4010 ZRENAMING, NULL), ==, 0);
4013 #ifdef FREEBSD_NAMECACHE
4024 zfs_rename_unlock(&zl);
4026 zfs_dirent_unlock(sdl);
4027 zfs_dirent_unlock(tdl);
4030 rw_exit(&sdzp->z_name_lock);
4037 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4038 zil_commit(zilog, 0);
4046 * Insert the indicated symbolic reference entry into the directory.
4048 * IN: dvp - Directory to contain new symbolic link.
4049 * link - Name for new symlink entry.
4050 * vap - Attributes of new entry.
4051 * cr - credentials of caller.
4052 * ct - caller context
4053 * flags - case flags
4055 * RETURN: 0 on success, error code on failure.
4058 * dvp - ctime|mtime updated
4062 zfs_symlink(vnode_t *dvp, vnode_t **vpp, char *name, vattr_t *vap, char *link,
4063 cred_t *cr, kthread_t *td)
4065 znode_t *zp, *dzp = VTOZ(dvp);
4068 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
4070 uint64_t len = strlen(link);
4073 zfs_acl_ids_t acl_ids;
4074 boolean_t fuid_dirtied;
4075 uint64_t txtype = TX_SYMLINK;
4078 ASSERT(vap->va_type == VLNK);
4082 zilog = zfsvfs->z_log;
4084 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
4085 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
4087 return (SET_ERROR(EILSEQ));
4089 if (flags & FIGNORECASE)
4092 if (len > MAXPATHLEN) {
4094 return (SET_ERROR(ENAMETOOLONG));
4097 if ((error = zfs_acl_ids_create(dzp, 0,
4098 vap, cr, NULL, &acl_ids)) != 0) {
4104 * Attempt to lock directory; fail if entry already exists.
4106 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
4108 zfs_acl_ids_free(&acl_ids);
4113 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
4114 zfs_acl_ids_free(&acl_ids);
4115 zfs_dirent_unlock(dl);
4120 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
4121 zfs_acl_ids_free(&acl_ids);
4122 zfs_dirent_unlock(dl);
4124 return (SET_ERROR(EDQUOT));
4126 tx = dmu_tx_create(zfsvfs->z_os);
4127 fuid_dirtied = zfsvfs->z_fuid_dirty;
4128 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
4129 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4130 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
4131 ZFS_SA_BASE_ATTR_SIZE + len);
4132 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
4133 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
4134 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
4135 acl_ids.z_aclp->z_acl_bytes);
4138 zfs_fuid_txhold(zfsvfs, tx);
4139 error = dmu_tx_assign(tx, TXG_NOWAIT);
4141 zfs_dirent_unlock(dl);
4142 if (error == ERESTART) {
4147 zfs_acl_ids_free(&acl_ids);
4154 * Create a new object for the symlink.
4155 * for version 4 ZPL datsets the symlink will be an SA attribute
4157 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
4160 zfs_fuid_sync(zfsvfs, tx);
4162 mutex_enter(&zp->z_lock);
4164 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
4167 zfs_sa_symlink(zp, link, len, tx);
4168 mutex_exit(&zp->z_lock);
4171 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
4172 &zp->z_size, sizeof (zp->z_size), tx);
4174 * Insert the new object into the directory.
4176 (void) zfs_link_create(dl, zp, tx, ZNEW);
4178 if (flags & FIGNORECASE)
4180 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
4183 zfs_acl_ids_free(&acl_ids);
4187 zfs_dirent_unlock(dl);
4189 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4190 zil_commit(zilog, 0);
4197 * Return, in the buffer contained in the provided uio structure,
4198 * the symbolic path referred to by vp.
4200 * IN: vp - vnode of symbolic link.
4201 * uio - structure to contain the link path.
4202 * cr - credentials of caller.
4203 * ct - caller context
4205 * OUT: uio - structure containing the link path.
4207 * RETURN: 0 on success, error code on failure.
4210 * vp - atime updated
4214 zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr, caller_context_t *ct)
4216 znode_t *zp = VTOZ(vp);
4217 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4223 mutex_enter(&zp->z_lock);
4225 error = sa_lookup_uio(zp->z_sa_hdl,
4226 SA_ZPL_SYMLINK(zfsvfs), uio);
4228 error = zfs_sa_readlink(zp, uio);
4229 mutex_exit(&zp->z_lock);
4231 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
4238 * Insert a new entry into directory tdvp referencing svp.
4240 * IN: tdvp - Directory to contain new entry.
4241 * svp - vnode of new entry.
4242 * name - name of new entry.
4243 * cr - credentials of caller.
4244 * ct - caller context
4246 * RETURN: 0 on success, error code on failure.
4249 * tdvp - ctime|mtime updated
4250 * svp - ctime updated
4254 zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr,
4255 caller_context_t *ct, int flags)
4257 znode_t *dzp = VTOZ(tdvp);
4259 zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
4269 ASSERT(tdvp->v_type == VDIR);
4273 zilog = zfsvfs->z_log;
4275 if (VOP_REALVP(svp, &realvp, ct) == 0)
4279 * POSIX dictates that we return EPERM here.
4280 * Better choices include ENOTSUP or EISDIR.
4282 if (svp->v_type == VDIR) {
4284 return (SET_ERROR(EPERM));
4287 if (svp->v_vfsp != tdvp->v_vfsp || zfsctl_is_node(svp)) {
4289 return (SET_ERROR(EXDEV));
4295 /* Prevent links to .zfs/shares files */
4297 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
4298 &parent, sizeof (uint64_t))) != 0) {
4302 if (parent == zfsvfs->z_shares_dir) {
4304 return (SET_ERROR(EPERM));
4307 if (zfsvfs->z_utf8 && u8_validate(name,
4308 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
4310 return (SET_ERROR(EILSEQ));
4312 if (flags & FIGNORECASE)
4316 * We do not support links between attributes and non-attributes
4317 * because of the potential security risk of creating links
4318 * into "normal" file space in order to circumvent restrictions
4319 * imposed in attribute space.
4321 if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
4323 return (SET_ERROR(EINVAL));
4327 owner = zfs_fuid_map_id(zfsvfs, szp->z_uid, cr, ZFS_OWNER);
4328 if (owner != crgetuid(cr) && secpolicy_basic_link(svp, cr) != 0) {
4330 return (SET_ERROR(EPERM));
4333 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
4340 * Attempt to lock directory; fail if entry already exists.
4342 error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
4348 tx = dmu_tx_create(zfsvfs->z_os);
4349 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
4350 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
4351 zfs_sa_upgrade_txholds(tx, szp);
4352 zfs_sa_upgrade_txholds(tx, dzp);
4353 error = dmu_tx_assign(tx, TXG_NOWAIT);
4355 zfs_dirent_unlock(dl);
4356 if (error == ERESTART) {
4366 error = zfs_link_create(dl, szp, tx, 0);
4369 uint64_t txtype = TX_LINK;
4370 if (flags & FIGNORECASE)
4372 zfs_log_link(zilog, tx, txtype, dzp, szp, name);
4377 zfs_dirent_unlock(dl);
4380 vnevent_link(svp, ct);
4383 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4384 zil_commit(zilog, 0);
4392 * zfs_null_putapage() is used when the file system has been force
4393 * unmounted. It just drops the pages.
4397 zfs_null_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
4398 size_t *lenp, int flags, cred_t *cr)
4400 pvn_write_done(pp, B_INVAL|B_FORCE|B_ERROR);
4405 * Push a page out to disk, klustering if possible.
4407 * IN: vp - file to push page to.
4408 * pp - page to push.
4409 * flags - additional flags.
4410 * cr - credentials of caller.
4412 * OUT: offp - start of range pushed.
4413 * lenp - len of range pushed.
4415 * RETURN: 0 on success, error code on failure.
4417 * NOTE: callers must have locked the page to be pushed. On
4418 * exit, the page (and all other pages in the kluster) must be
4423 zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
4424 size_t *lenp, int flags, cred_t *cr)
4426 znode_t *zp = VTOZ(vp);
4427 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4429 u_offset_t off, koff;
4436 * If our blocksize is bigger than the page size, try to kluster
4437 * multiple pages so that we write a full block (thus avoiding
4438 * a read-modify-write).
4440 if (off < zp->z_size && zp->z_blksz > PAGESIZE) {
4441 klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE);
4442 koff = ISP2(klen) ? P2ALIGN(off, (u_offset_t)klen) : 0;
4443 ASSERT(koff <= zp->z_size);
4444 if (koff + klen > zp->z_size)
4445 klen = P2ROUNDUP(zp->z_size - koff, (uint64_t)PAGESIZE);
4446 pp = pvn_write_kluster(vp, pp, &off, &len, koff, klen, flags);
4448 ASSERT3U(btop(len), ==, btopr(len));
4451 * Can't push pages past end-of-file.
4453 if (off >= zp->z_size) {
4454 /* ignore all pages */
4457 } else if (off + len > zp->z_size) {
4458 int npages = btopr(zp->z_size - off);
4461 page_list_break(&pp, &trunc, npages);
4462 /* ignore pages past end of file */
4464 pvn_write_done(trunc, flags);
4465 len = zp->z_size - off;
4468 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
4469 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
4470 err = SET_ERROR(EDQUOT);
4474 tx = dmu_tx_create(zfsvfs->z_os);
4475 dmu_tx_hold_write(tx, zp->z_id, off, len);
4477 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4478 zfs_sa_upgrade_txholds(tx, zp);
4479 err = dmu_tx_assign(tx, TXG_NOWAIT);
4481 if (err == ERESTART) {
4490 if (zp->z_blksz <= PAGESIZE) {
4491 caddr_t va = zfs_map_page(pp, S_READ);
4492 ASSERT3U(len, <=, PAGESIZE);
4493 dmu_write(zfsvfs->z_os, zp->z_id, off, len, va, tx);
4494 zfs_unmap_page(pp, va);
4496 err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, pp, tx);
4500 uint64_t mtime[2], ctime[2];
4501 sa_bulk_attr_t bulk[3];
4504 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
4506 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
4508 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
4510 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
4512 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0);
4517 pvn_write_done(pp, (err ? B_ERROR : 0) | flags);
4527 * Copy the portion of the file indicated from pages into the file.
4528 * The pages are stored in a page list attached to the files vnode.
4530 * IN: vp - vnode of file to push page data to.
4531 * off - position in file to put data.
4532 * len - amount of data to write.
4533 * flags - flags to control the operation.
4534 * cr - credentials of caller.
4535 * ct - caller context.
4537 * RETURN: 0 on success, error code on failure.
4540 * vp - ctime|mtime updated
4544 zfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
4545 caller_context_t *ct)
4547 znode_t *zp = VTOZ(vp);
4548 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4560 * Align this request to the file block size in case we kluster.
4561 * XXX - this can result in pretty aggresive locking, which can
4562 * impact simultanious read/write access. One option might be
4563 * to break up long requests (len == 0) into block-by-block
4564 * operations to get narrower locking.
4566 blksz = zp->z_blksz;
4568 io_off = P2ALIGN_TYPED(off, blksz, u_offset_t);
4571 if (len > 0 && ISP2(blksz))
4572 io_len = P2ROUNDUP_TYPED(len + (off - io_off), blksz, size_t);
4578 * Search the entire vp list for pages >= io_off.
4580 rl = zfs_range_lock(zp, io_off, UINT64_MAX, RL_WRITER);
4581 error = pvn_vplist_dirty(vp, io_off, zfs_putapage, flags, cr);
4584 rl = zfs_range_lock(zp, io_off, io_len, RL_WRITER);
4586 if (off > zp->z_size) {
4587 /* past end of file */
4588 zfs_range_unlock(rl);
4593 len = MIN(io_len, P2ROUNDUP(zp->z_size, PAGESIZE) - io_off);
4595 for (off = io_off; io_off < off + len; io_off += io_len) {
4596 if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
4597 pp = page_lookup(vp, io_off,
4598 (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED);
4600 pp = page_lookup_nowait(vp, io_off,
4601 (flags & B_FREE) ? SE_EXCL : SE_SHARED);
4604 if (pp != NULL && pvn_getdirty(pp, flags)) {
4608 * Found a dirty page to push
4610 err = zfs_putapage(vp, pp, &io_off, &io_len, flags, cr);
4618 zfs_range_unlock(rl);
4619 if ((flags & B_ASYNC) == 0 || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
4620 zil_commit(zfsvfs->z_log, zp->z_id);
4628 zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
4630 znode_t *zp = VTOZ(vp);
4631 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4634 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
4635 if (zp->z_sa_hdl == NULL) {
4637 * The fs has been unmounted, or we did a
4638 * suspend/resume and this file no longer exists.
4640 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4641 vrecycle(vp, curthread);
4645 mutex_enter(&zp->z_lock);
4646 if (zp->z_unlinked) {
4648 * Fast path to recycle a vnode of a removed file.
4650 mutex_exit(&zp->z_lock);
4651 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4652 vrecycle(vp, curthread);
4655 mutex_exit(&zp->z_lock);
4657 if (zp->z_atime_dirty && zp->z_unlinked == 0) {
4658 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
4660 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4661 zfs_sa_upgrade_txholds(tx, zp);
4662 error = dmu_tx_assign(tx, TXG_WAIT);
4666 mutex_enter(&zp->z_lock);
4667 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
4668 (void *)&zp->z_atime, sizeof (zp->z_atime), tx);
4669 zp->z_atime_dirty = 0;
4670 mutex_exit(&zp->z_lock);
4674 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4679 * Bounds-check the seek operation.
4681 * IN: vp - vnode seeking within
4682 * ooff - old file offset
4683 * noffp - pointer to new file offset
4684 * ct - caller context
4686 * RETURN: 0 on success, EINVAL if new offset invalid.
4690 zfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp,
4691 caller_context_t *ct)
4693 if (vp->v_type == VDIR)
4695 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4699 * Pre-filter the generic locking function to trap attempts to place
4700 * a mandatory lock on a memory mapped file.
4703 zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset,
4704 flk_callback_t *flk_cbp, cred_t *cr, caller_context_t *ct)
4706 znode_t *zp = VTOZ(vp);
4707 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4713 * We are following the UFS semantics with respect to mapcnt
4714 * here: If we see that the file is mapped already, then we will
4715 * return an error, but we don't worry about races between this
4716 * function and zfs_map().
4718 if (zp->z_mapcnt > 0 && MANDMODE(zp->z_mode)) {
4720 return (SET_ERROR(EAGAIN));
4723 return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
4727 * If we can't find a page in the cache, we will create a new page
4728 * and fill it with file data. For efficiency, we may try to fill
4729 * multiple pages at once (klustering) to fill up the supplied page
4730 * list. Note that the pages to be filled are held with an exclusive
4731 * lock to prevent access by other threads while they are being filled.
4734 zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg,
4735 caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw)
4737 znode_t *zp = VTOZ(vp);
4738 page_t *pp, *cur_pp;
4739 objset_t *os = zp->z_zfsvfs->z_os;
4740 u_offset_t io_off, total;
4744 if (plsz == PAGESIZE || zp->z_blksz <= PAGESIZE) {
4746 * We only have a single page, don't bother klustering
4750 pp = page_create_va(vp, io_off, io_len,
4751 PG_EXCL | PG_WAIT, seg, addr);
4754 * Try to find enough pages to fill the page list
4756 pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
4757 &io_len, off, plsz, 0);
4761 * The page already exists, nothing to do here.
4768 * Fill the pages in the kluster.
4771 for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
4774 ASSERT3U(io_off, ==, cur_pp->p_offset);
4775 va = zfs_map_page(cur_pp, S_WRITE);
4776 err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
4778 zfs_unmap_page(cur_pp, va);
4780 /* On error, toss the entire kluster */
4781 pvn_read_done(pp, B_ERROR);
4782 /* convert checksum errors into IO errors */
4784 err = SET_ERROR(EIO);
4787 cur_pp = cur_pp->p_next;
4791 * Fill in the page list array from the kluster starting
4792 * from the desired offset `off'.
4793 * NOTE: the page list will always be null terminated.
4795 pvn_plist_init(pp, pl, plsz, off, io_len, rw);
4796 ASSERT(pl == NULL || (*pl)->p_offset == off);
4802 * Return pointers to the pages for the file region [off, off + len]
4803 * in the pl array. If plsz is greater than len, this function may
4804 * also return page pointers from after the specified region
4805 * (i.e. the region [off, off + plsz]). These additional pages are
4806 * only returned if they are already in the cache, or were created as
4807 * part of a klustered read.
4809 * IN: vp - vnode of file to get data from.
4810 * off - position in file to get data from.
4811 * len - amount of data to retrieve.
4812 * plsz - length of provided page list.
4813 * seg - segment to obtain pages for.
4814 * addr - virtual address of fault.
4815 * rw - mode of created pages.
4816 * cr - credentials of caller.
4817 * ct - caller context.
4819 * OUT: protp - protection mode of created pages.
4820 * pl - list of pages created.
4822 * RETURN: 0 on success, error code on failure.
4825 * vp - atime updated
4829 zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
4830 page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
4831 enum seg_rw rw, cred_t *cr, caller_context_t *ct)
4833 znode_t *zp = VTOZ(vp);
4834 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4838 /* we do our own caching, faultahead is unnecessary */
4841 else if (len > plsz)
4844 len = P2ROUNDUP(len, PAGESIZE);
4845 ASSERT(plsz >= len);
4854 * Loop through the requested range [off, off + len) looking
4855 * for pages. If we don't find a page, we will need to create
4856 * a new page and fill it with data from the file.
4859 if (*pl = page_lookup(vp, off, SE_SHARED))
4861 else if (err = zfs_fillpage(vp, off, seg, addr, pl, plsz, rw))
4864 ASSERT3U((*pl)->p_offset, ==, off);
4868 ASSERT3U(len, >=, PAGESIZE);
4871 ASSERT3U(plsz, >=, PAGESIZE);
4878 * Fill out the page array with any pages already in the cache.
4881 (*pl++ = page_lookup_nowait(vp, off, SE_SHARED))) {
4888 * Release any pages we have previously locked.
4893 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
4903 * Request a memory map for a section of a file. This code interacts
4904 * with common code and the VM system as follows:
4906 * - common code calls mmap(), which ends up in smmap_common()
4907 * - this calls VOP_MAP(), which takes you into (say) zfs
4908 * - zfs_map() calls as_map(), passing segvn_create() as the callback
4909 * - segvn_create() creates the new segment and calls VOP_ADDMAP()
4910 * - zfs_addmap() updates z_mapcnt
4914 zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
4915 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4916 caller_context_t *ct)
4918 znode_t *zp = VTOZ(vp);
4919 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4920 segvn_crargs_t vn_a;
4926 if ((prot & PROT_WRITE) && (zp->z_pflags &
4927 (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
4929 return (SET_ERROR(EPERM));
4932 if ((prot & (PROT_READ | PROT_EXEC)) &&
4933 (zp->z_pflags & ZFS_AV_QUARANTINED)) {
4935 return (SET_ERROR(EACCES));
4938 if (vp->v_flag & VNOMAP) {
4940 return (SET_ERROR(ENOSYS));
4943 if (off < 0 || len > MAXOFFSET_T - off) {
4945 return (SET_ERROR(ENXIO));
4948 if (vp->v_type != VREG) {
4950 return (SET_ERROR(ENODEV));
4954 * If file is locked, disallow mapping.
4956 if (MANDMODE(zp->z_mode) && vn_has_flocks(vp)) {
4958 return (SET_ERROR(EAGAIN));
4962 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
4970 vn_a.offset = (u_offset_t)off;
4971 vn_a.type = flags & MAP_TYPE;
4973 vn_a.maxprot = maxprot;
4976 vn_a.flags = flags & ~MAP_TYPE;
4978 vn_a.lgrp_mem_policy_flags = 0;
4980 error = as_map(as, *addrp, len, segvn_create, &vn_a);
4989 zfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4990 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr,
4991 caller_context_t *ct)
4993 uint64_t pages = btopr(len);
4995 atomic_add_64(&VTOZ(vp)->z_mapcnt, pages);
5000 * The reason we push dirty pages as part of zfs_delmap() is so that we get a
5001 * more accurate mtime for the associated file. Since we don't have a way of
5002 * detecting when the data was actually modified, we have to resort to
5003 * heuristics. If an explicit msync() is done, then we mark the mtime when the
5004 * last page is pushed. The problem occurs when the msync() call is omitted,
5005 * which by far the most common case:
5013 * putpage() via fsflush
5015 * If we wait until fsflush to come along, we can have a modification time that
5016 * is some arbitrary point in the future. In order to prevent this in the
5017 * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is
5022 zfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
5023 size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr,
5024 caller_context_t *ct)
5026 uint64_t pages = btopr(len);
5028 ASSERT3U(VTOZ(vp)->z_mapcnt, >=, pages);
5029 atomic_add_64(&VTOZ(vp)->z_mapcnt, -pages);
5031 if ((flags & MAP_SHARED) && (prot & PROT_WRITE) &&
5032 vn_has_cached_data(vp))
5033 (void) VOP_PUTPAGE(vp, off, len, B_ASYNC, cr, ct);
5039 * Free or allocate space in a file. Currently, this function only
5040 * supports the `F_FREESP' command. However, this command is somewhat
5041 * misnamed, as its functionality includes the ability to allocate as
5042 * well as free space.
5044 * IN: vp - vnode of file to free data in.
5045 * cmd - action to take (only F_FREESP supported).
5046 * bfp - section of file to free/alloc.
5047 * flag - current file open mode flags.
5048 * offset - current file offset.
5049 * cr - credentials of caller [UNUSED].
5050 * ct - caller context.
5052 * RETURN: 0 on success, error code on failure.
5055 * vp - ctime|mtime updated
5059 zfs_space(vnode_t *vp, int cmd, flock64_t *bfp, int flag,
5060 offset_t offset, cred_t *cr, caller_context_t *ct)
5062 znode_t *zp = VTOZ(vp);
5063 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5070 if (cmd != F_FREESP) {
5072 return (SET_ERROR(EINVAL));
5075 if (error = convoff(vp, bfp, 0, offset)) {
5080 if (bfp->l_len < 0) {
5082 return (SET_ERROR(EINVAL));
5086 len = bfp->l_len; /* 0 means from off to end of file */
5088 error = zfs_freesp(zp, off, len, flag, TRUE);
5095 CTASSERT(sizeof(struct zfid_short) <= sizeof(struct fid));
5096 CTASSERT(sizeof(struct zfid_long) <= sizeof(struct fid));
5100 zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
5102 znode_t *zp = VTOZ(vp);
5103 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5106 uint64_t object = zp->z_id;
5113 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
5114 &gen64, sizeof (uint64_t))) != 0) {
5119 gen = (uint32_t)gen64;
5121 size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN;
5124 if (fidp->fid_len < size) {
5125 fidp->fid_len = size;
5127 return (SET_ERROR(ENOSPC));
5130 fidp->fid_len = size;
5133 zfid = (zfid_short_t *)fidp;
5135 zfid->zf_len = size;
5137 for (i = 0; i < sizeof (zfid->zf_object); i++)
5138 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
5140 /* Must have a non-zero generation number to distinguish from .zfs */
5143 for (i = 0; i < sizeof (zfid->zf_gen); i++)
5144 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
5146 if (size == LONG_FID_LEN) {
5147 uint64_t objsetid = dmu_objset_id(zfsvfs->z_os);
5150 zlfid = (zfid_long_t *)fidp;
5152 for (i = 0; i < sizeof (zlfid->zf_setid); i++)
5153 zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
5155 /* XXX - this should be the generation number for the objset */
5156 for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
5157 zlfid->zf_setgen[i] = 0;
5165 zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
5166 caller_context_t *ct)
5178 case _PC_FILESIZEBITS:
5182 case _PC_XATTR_EXISTS:
5184 zfsvfs = zp->z_zfsvfs;
5188 error = zfs_dirent_lock(&dl, zp, "", &xzp,
5189 ZXATTR | ZEXISTS | ZSHARED, NULL, NULL);
5191 zfs_dirent_unlock(dl);
5192 if (!zfs_dirempty(xzp))
5195 } else if (error == ENOENT) {
5197 * If there aren't extended attributes, it's the
5198 * same as having zero of them.
5205 case _PC_SATTR_ENABLED:
5206 case _PC_SATTR_EXISTS:
5207 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
5208 (vp->v_type == VREG || vp->v_type == VDIR);
5211 case _PC_ACCESS_FILTERING:
5212 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_ACCESS_FILTER) &&
5216 case _PC_ACL_ENABLED:
5217 *valp = _ACL_ACE_ENABLED;
5220 case _PC_MIN_HOLE_SIZE:
5221 *valp = (int)SPA_MINBLOCKSIZE;
5224 case _PC_TIMESTAMP_RESOLUTION:
5225 /* nanosecond timestamp resolution */
5229 case _PC_ACL_EXTENDED:
5237 case _PC_ACL_PATH_MAX:
5238 *valp = ACL_MAX_ENTRIES;
5242 return (EOPNOTSUPP);
5248 zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
5249 caller_context_t *ct)
5251 znode_t *zp = VTOZ(vp);
5252 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5254 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
5258 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
5266 zfs_setsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr,
5267 caller_context_t *ct)
5269 znode_t *zp = VTOZ(vp);
5270 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5272 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
5273 zilog_t *zilog = zfsvfs->z_log;
5278 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
5280 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
5281 zil_commit(zilog, 0);
5289 * The smallest read we may consider to loan out an arcbuf.
5290 * This must be a power of 2.
5292 int zcr_blksz_min = (1 << 10); /* 1K */
5294 * If set to less than the file block size, allow loaning out of an
5295 * arcbuf for a partial block read. This must be a power of 2.
5297 int zcr_blksz_max = (1 << 17); /* 128K */
5301 zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr,
5302 caller_context_t *ct)
5304 znode_t *zp = VTOZ(vp);
5305 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5306 int max_blksz = zfsvfs->z_max_blksz;
5307 uio_t *uio = &xuio->xu_uio;
5308 ssize_t size = uio->uio_resid;
5309 offset_t offset = uio->uio_loffset;
5314 int preamble, postamble;
5316 if (xuio->xu_type != UIOTYPE_ZEROCOPY)
5317 return (SET_ERROR(EINVAL));
5324 * Loan out an arc_buf for write if write size is bigger than
5325 * max_blksz, and the file's block size is also max_blksz.
5328 if (size < blksz || zp->z_blksz != blksz) {
5330 return (SET_ERROR(EINVAL));
5333 * Caller requests buffers for write before knowing where the
5334 * write offset might be (e.g. NFS TCP write).
5339 preamble = P2PHASE(offset, blksz);
5341 preamble = blksz - preamble;
5346 postamble = P2PHASE(size, blksz);
5349 fullblk = size / blksz;
5350 (void) dmu_xuio_init(xuio,
5351 (preamble != 0) + fullblk + (postamble != 0));
5352 DTRACE_PROBE3(zfs_reqzcbuf_align, int, preamble,
5353 int, postamble, int,
5354 (preamble != 0) + fullblk + (postamble != 0));
5357 * Have to fix iov base/len for partial buffers. They
5358 * currently represent full arc_buf's.
5361 /* data begins in the middle of the arc_buf */
5362 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5365 (void) dmu_xuio_add(xuio, abuf,
5366 blksz - preamble, preamble);
5369 for (i = 0; i < fullblk; i++) {
5370 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5373 (void) dmu_xuio_add(xuio, abuf, 0, blksz);
5377 /* data ends in the middle of the arc_buf */
5378 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
5381 (void) dmu_xuio_add(xuio, abuf, 0, postamble);
5386 * Loan out an arc_buf for read if the read size is larger than
5387 * the current file block size. Block alignment is not
5388 * considered. Partial arc_buf will be loaned out for read.
5390 blksz = zp->z_blksz;
5391 if (blksz < zcr_blksz_min)
5392 blksz = zcr_blksz_min;
5393 if (blksz > zcr_blksz_max)
5394 blksz = zcr_blksz_max;
5395 /* avoid potential complexity of dealing with it */
5396 if (blksz > max_blksz) {
5398 return (SET_ERROR(EINVAL));
5401 maxsize = zp->z_size - uio->uio_loffset;
5405 if (size < blksz || vn_has_cached_data(vp)) {
5407 return (SET_ERROR(EINVAL));
5412 return (SET_ERROR(EINVAL));
5415 uio->uio_extflg = UIO_XUIO;
5416 XUIO_XUZC_RW(xuio) = ioflag;
5423 zfs_retzcbuf(vnode_t *vp, xuio_t *xuio, cred_t *cr, caller_context_t *ct)
5427 int ioflag = XUIO_XUZC_RW(xuio);
5429 ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
5431 i = dmu_xuio_cnt(xuio);
5433 abuf = dmu_xuio_arcbuf(xuio, i);
5435 * if abuf == NULL, it must be a write buffer
5436 * that has been returned in zfs_write().
5439 dmu_return_arcbuf(abuf);
5440 ASSERT(abuf || ioflag == UIO_WRITE);
5443 dmu_xuio_fini(xuio);
5448 * Predeclare these here so that the compiler assumes that
5449 * this is an "old style" function declaration that does
5450 * not include arguments => we won't get type mismatch errors
5451 * in the initializations that follow.
5453 static int zfs_inval();
5454 static int zfs_isdir();
5459 return (SET_ERROR(EINVAL));
5465 return (SET_ERROR(EISDIR));
5468 * Directory vnode operations template
5470 vnodeops_t *zfs_dvnodeops;
5471 const fs_operation_def_t zfs_dvnodeops_template[] = {
5472 VOPNAME_OPEN, { .vop_open = zfs_open },
5473 VOPNAME_CLOSE, { .vop_close = zfs_close },
5474 VOPNAME_READ, { .error = zfs_isdir },
5475 VOPNAME_WRITE, { .error = zfs_isdir },
5476 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl },
5477 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr },
5478 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr },
5479 VOPNAME_ACCESS, { .vop_access = zfs_access },
5480 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup },
5481 VOPNAME_CREATE, { .vop_create = zfs_create },
5482 VOPNAME_REMOVE, { .vop_remove = zfs_remove },
5483 VOPNAME_LINK, { .vop_link = zfs_link },
5484 VOPNAME_RENAME, { .vop_rename = zfs_rename },
5485 VOPNAME_MKDIR, { .vop_mkdir = zfs_mkdir },
5486 VOPNAME_RMDIR, { .vop_rmdir = zfs_rmdir },
5487 VOPNAME_READDIR, { .vop_readdir = zfs_readdir },
5488 VOPNAME_SYMLINK, { .vop_symlink = zfs_symlink },
5489 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync },
5490 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
5491 VOPNAME_FID, { .vop_fid = zfs_fid },
5492 VOPNAME_SEEK, { .vop_seek = zfs_seek },
5493 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
5494 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr },
5495 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr },
5496 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
5501 * Regular file vnode operations template
5503 vnodeops_t *zfs_fvnodeops;
5504 const fs_operation_def_t zfs_fvnodeops_template[] = {
5505 VOPNAME_OPEN, { .vop_open = zfs_open },
5506 VOPNAME_CLOSE, { .vop_close = zfs_close },
5507 VOPNAME_READ, { .vop_read = zfs_read },
5508 VOPNAME_WRITE, { .vop_write = zfs_write },
5509 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl },
5510 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr },
5511 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr },
5512 VOPNAME_ACCESS, { .vop_access = zfs_access },
5513 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup },
5514 VOPNAME_RENAME, { .vop_rename = zfs_rename },
5515 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync },
5516 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
5517 VOPNAME_FID, { .vop_fid = zfs_fid },
5518 VOPNAME_SEEK, { .vop_seek = zfs_seek },
5519 VOPNAME_FRLOCK, { .vop_frlock = zfs_frlock },
5520 VOPNAME_SPACE, { .vop_space = zfs_space },
5521 VOPNAME_GETPAGE, { .vop_getpage = zfs_getpage },
5522 VOPNAME_PUTPAGE, { .vop_putpage = zfs_putpage },
5523 VOPNAME_MAP, { .vop_map = zfs_map },
5524 VOPNAME_ADDMAP, { .vop_addmap = zfs_addmap },
5525 VOPNAME_DELMAP, { .vop_delmap = zfs_delmap },
5526 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
5527 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr },
5528 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr },
5529 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
5530 VOPNAME_REQZCBUF, { .vop_reqzcbuf = zfs_reqzcbuf },
5531 VOPNAME_RETZCBUF, { .vop_retzcbuf = zfs_retzcbuf },
5536 * Symbolic link vnode operations template
5538 vnodeops_t *zfs_symvnodeops;
5539 const fs_operation_def_t zfs_symvnodeops_template[] = {
5540 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr },
5541 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr },
5542 VOPNAME_ACCESS, { .vop_access = zfs_access },
5543 VOPNAME_RENAME, { .vop_rename = zfs_rename },
5544 VOPNAME_READLINK, { .vop_readlink = zfs_readlink },
5545 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
5546 VOPNAME_FID, { .vop_fid = zfs_fid },
5547 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
5548 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
5553 * special share hidden files vnode operations template
5555 vnodeops_t *zfs_sharevnodeops;
5556 const fs_operation_def_t zfs_sharevnodeops_template[] = {
5557 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr },
5558 VOPNAME_ACCESS, { .vop_access = zfs_access },
5559 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
5560 VOPNAME_FID, { .vop_fid = zfs_fid },
5561 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
5562 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr },
5563 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr },
5564 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
5569 * Extended attribute directory vnode operations template
5571 * This template is identical to the directory vnodes
5572 * operation template except for restricted operations:
5576 * Note that there are other restrictions embedded in:
5577 * zfs_create() - restrict type to VREG
5578 * zfs_link() - no links into/out of attribute space
5579 * zfs_rename() - no moves into/out of attribute space
5581 vnodeops_t *zfs_xdvnodeops;
5582 const fs_operation_def_t zfs_xdvnodeops_template[] = {
5583 VOPNAME_OPEN, { .vop_open = zfs_open },
5584 VOPNAME_CLOSE, { .vop_close = zfs_close },
5585 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl },
5586 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr },
5587 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr },
5588 VOPNAME_ACCESS, { .vop_access = zfs_access },
5589 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup },
5590 VOPNAME_CREATE, { .vop_create = zfs_create },
5591 VOPNAME_REMOVE, { .vop_remove = zfs_remove },
5592 VOPNAME_LINK, { .vop_link = zfs_link },
5593 VOPNAME_RENAME, { .vop_rename = zfs_rename },
5594 VOPNAME_MKDIR, { .error = zfs_inval },
5595 VOPNAME_RMDIR, { .vop_rmdir = zfs_rmdir },
5596 VOPNAME_READDIR, { .vop_readdir = zfs_readdir },
5597 VOPNAME_SYMLINK, { .error = zfs_inval },
5598 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync },
5599 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
5600 VOPNAME_FID, { .vop_fid = zfs_fid },
5601 VOPNAME_SEEK, { .vop_seek = zfs_seek },
5602 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
5603 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr },
5604 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr },
5605 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support },
5610 * Error vnode operations template
5612 vnodeops_t *zfs_evnodeops;
5613 const fs_operation_def_t zfs_evnodeops_template[] = {
5614 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive },
5615 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf },
5621 ioflags(int ioflags)
5625 if (ioflags & IO_APPEND)
5627 if (ioflags & IO_NDELAY)
5629 if (ioflags & IO_SYNC)
5630 flags |= (FSYNC | FDSYNC | FRSYNC);
5636 zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage)
5638 znode_t *zp = VTOZ(vp);
5639 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
5640 objset_t *os = zp->z_zfsvfs->z_os;
5641 vm_page_t mfirst, mlast, mreq;
5645 off_t startoff, endoff;
5647 vm_pindex_t reqstart, reqend;
5648 int pcount, lsize, reqsize, size;
5653 pcount = OFF_TO_IDX(round_page(count));
5655 object = mreq->object;
5658 KASSERT(vp->v_object == object, ("mismatching object"));
5660 if (pcount > 1 && zp->z_blksz > PAGESIZE) {
5661 startoff = rounddown(IDX_TO_OFF(mreq->pindex), zp->z_blksz);
5662 reqstart = OFF_TO_IDX(round_page(startoff));
5663 if (reqstart < m[0]->pindex)
5666 reqstart = reqstart - m[0]->pindex;
5667 endoff = roundup(IDX_TO_OFF(mreq->pindex) + PAGE_SIZE,
5669 reqend = OFF_TO_IDX(trunc_page(endoff)) - 1;
5670 if (reqend > m[pcount - 1]->pindex)
5671 reqend = m[pcount - 1]->pindex;
5672 reqsize = reqend - m[reqstart]->pindex + 1;
5673 KASSERT(reqstart <= reqpage && reqpage < reqstart + reqsize,
5674 ("reqpage beyond [reqstart, reqstart + reqsize[ bounds"));
5679 mfirst = m[reqstart];
5680 mlast = m[reqstart + reqsize - 1];
5682 VM_OBJECT_LOCK(object);
5684 for (i = 0; i < reqstart; i++) {
5687 vm_page_unlock(m[i]);
5689 for (i = reqstart + reqsize; i < pcount; i++) {
5692 vm_page_unlock(m[i]);
5695 if (mreq->valid && reqsize == 1) {
5696 if (mreq->valid != VM_PAGE_BITS_ALL)
5697 vm_page_zero_invalid(mreq, TRUE);
5698 VM_OBJECT_UNLOCK(object);
5700 return (VM_PAGER_OK);
5703 PCPU_INC(cnt.v_vnodein);
5704 PCPU_ADD(cnt.v_vnodepgsin, reqsize);
5706 if (IDX_TO_OFF(mreq->pindex) >= object->un_pager.vnp.vnp_size) {
5707 for (i = reqstart; i < reqstart + reqsize; i++) {
5711 vm_page_unlock(m[i]);
5714 VM_OBJECT_UNLOCK(object);
5716 return (VM_PAGER_BAD);
5720 if (IDX_TO_OFF(mlast->pindex) + lsize > object->un_pager.vnp.vnp_size)
5721 lsize = object->un_pager.vnp.vnp_size - IDX_TO_OFF(mlast->pindex);
5723 VM_OBJECT_UNLOCK(object);
5725 for (i = reqstart; i < reqstart + reqsize; i++) {
5727 if (i == (reqstart + reqsize - 1))
5729 va = zfs_map_page(m[i], &sf);
5730 error = dmu_read(os, zp->z_id, IDX_TO_OFF(m[i]->pindex),
5731 size, va, DMU_READ_PREFETCH);
5732 if (size != PAGE_SIZE)
5733 bzero(va + size, PAGE_SIZE - size);
5739 VM_OBJECT_LOCK(object);
5741 for (i = reqstart; i < reqstart + reqsize; i++) {
5743 m[i]->valid = VM_PAGE_BITS_ALL;
5744 KASSERT(m[i]->dirty == 0, ("zfs_getpages: page %p is dirty", m[i]));
5746 vm_page_readahead_finish(m[i]);
5749 VM_OBJECT_UNLOCK(object);
5751 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
5753 return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
5757 zfs_freebsd_getpages(ap)
5758 struct vop_getpages_args /* {
5763 vm_ooffset_t a_offset;
5767 return (zfs_getpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_reqpage));
5771 zfs_freebsd_bmap(ap)
5772 struct vop_bmap_args /* {
5775 struct bufobj **a_bop;
5782 if (ap->a_bop != NULL)
5783 *ap->a_bop = &ap->a_vp->v_bufobj;
5784 if (ap->a_bnp != NULL)
5785 *ap->a_bnp = ap->a_bn;
5786 if (ap->a_runp != NULL)
5788 if (ap->a_runb != NULL)
5795 zfs_freebsd_open(ap)
5796 struct vop_open_args /* {
5799 struct ucred *a_cred;
5800 struct thread *a_td;
5803 vnode_t *vp = ap->a_vp;
5804 znode_t *zp = VTOZ(vp);
5807 error = zfs_open(&vp, ap->a_mode, ap->a_cred, NULL);
5809 vnode_create_vobject(vp, zp->z_size, ap->a_td);
5814 zfs_freebsd_close(ap)
5815 struct vop_close_args /* {
5818 struct ucred *a_cred;
5819 struct thread *a_td;
5823 return (zfs_close(ap->a_vp, ap->a_fflag, 1, 0, ap->a_cred, NULL));
5827 zfs_freebsd_ioctl(ap)
5828 struct vop_ioctl_args /* {
5838 return (zfs_ioctl(ap->a_vp, ap->a_command, (intptr_t)ap->a_data,
5839 ap->a_fflag, ap->a_cred, NULL, NULL));
5843 zfs_freebsd_read(ap)
5844 struct vop_read_args /* {
5848 struct ucred *a_cred;
5852 return (zfs_read(ap->a_vp, ap->a_uio, ioflags(ap->a_ioflag),
5857 zfs_freebsd_write(ap)
5858 struct vop_write_args /* {
5862 struct ucred *a_cred;
5866 return (zfs_write(ap->a_vp, ap->a_uio, ioflags(ap->a_ioflag),
5871 zfs_freebsd_access(ap)
5872 struct vop_access_args /* {
5874 accmode_t a_accmode;
5875 struct ucred *a_cred;
5876 struct thread *a_td;
5879 vnode_t *vp = ap->a_vp;
5880 znode_t *zp = VTOZ(vp);
5885 * ZFS itself only knowns about VREAD, VWRITE, VEXEC and VAPPEND,
5887 accmode = ap->a_accmode & (VREAD|VWRITE|VEXEC|VAPPEND);
5889 error = zfs_access(ap->a_vp, accmode, 0, ap->a_cred, NULL);
5892 * VADMIN has to be handled by vaccess().
5895 accmode = ap->a_accmode & ~(VREAD|VWRITE|VEXEC|VAPPEND);
5897 error = vaccess(vp->v_type, zp->z_mode, zp->z_uid,
5898 zp->z_gid, accmode, ap->a_cred, NULL);
5903 * For VEXEC, ensure that at least one execute bit is set for
5906 if (error == 0 && (ap->a_accmode & VEXEC) != 0 && vp->v_type != VDIR &&
5907 (zp->z_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
5915 zfs_freebsd_lookup(ap)
5916 struct vop_lookup_args /* {
5917 struct vnode *a_dvp;
5918 struct vnode **a_vpp;
5919 struct componentname *a_cnp;
5922 struct componentname *cnp = ap->a_cnp;
5923 char nm[NAME_MAX + 1];
5925 ASSERT(cnp->cn_namelen < sizeof(nm));
5926 strlcpy(nm, cnp->cn_nameptr, MIN(cnp->cn_namelen + 1, sizeof(nm)));
5928 return (zfs_lookup(ap->a_dvp, nm, ap->a_vpp, cnp, cnp->cn_nameiop,
5929 cnp->cn_cred, cnp->cn_thread, 0));
5933 zfs_freebsd_create(ap)
5934 struct vop_create_args /* {
5935 struct vnode *a_dvp;
5936 struct vnode **a_vpp;
5937 struct componentname *a_cnp;
5938 struct vattr *a_vap;
5941 struct componentname *cnp = ap->a_cnp;
5942 vattr_t *vap = ap->a_vap;
5945 ASSERT(cnp->cn_flags & SAVENAME);
5947 vattr_init_mask(vap);
5948 mode = vap->va_mode & ALLPERMS;
5950 return (zfs_create(ap->a_dvp, cnp->cn_nameptr, vap, !EXCL, mode,
5951 ap->a_vpp, cnp->cn_cred, cnp->cn_thread));
5955 zfs_freebsd_remove(ap)
5956 struct vop_remove_args /* {
5957 struct vnode *a_dvp;
5959 struct componentname *a_cnp;
5963 ASSERT(ap->a_cnp->cn_flags & SAVENAME);
5965 return (zfs_remove(ap->a_dvp, ap->a_cnp->cn_nameptr,
5966 ap->a_cnp->cn_cred, NULL, 0));
5970 zfs_freebsd_mkdir(ap)
5971 struct vop_mkdir_args /* {
5972 struct vnode *a_dvp;
5973 struct vnode **a_vpp;
5974 struct componentname *a_cnp;
5975 struct vattr *a_vap;
5978 vattr_t *vap = ap->a_vap;
5980 ASSERT(ap->a_cnp->cn_flags & SAVENAME);
5982 vattr_init_mask(vap);
5984 return (zfs_mkdir(ap->a_dvp, ap->a_cnp->cn_nameptr, vap, ap->a_vpp,
5985 ap->a_cnp->cn_cred, NULL, 0, NULL));
5989 zfs_freebsd_rmdir(ap)
5990 struct vop_rmdir_args /* {
5991 struct vnode *a_dvp;
5993 struct componentname *a_cnp;
5996 struct componentname *cnp = ap->a_cnp;
5998 ASSERT(cnp->cn_flags & SAVENAME);
6000 return (zfs_rmdir(ap->a_dvp, cnp->cn_nameptr, NULL, cnp->cn_cred, NULL, 0));
6004 zfs_freebsd_readdir(ap)
6005 struct vop_readdir_args /* {
6008 struct ucred *a_cred;
6015 return (zfs_readdir(ap->a_vp, ap->a_uio, ap->a_cred, ap->a_eofflag,
6016 ap->a_ncookies, ap->a_cookies));
6020 zfs_freebsd_fsync(ap)
6021 struct vop_fsync_args /* {
6024 struct thread *a_td;
6029 return (zfs_fsync(ap->a_vp, 0, ap->a_td->td_ucred, NULL));
6033 zfs_freebsd_getattr(ap)
6034 struct vop_getattr_args /* {
6036 struct vattr *a_vap;
6037 struct ucred *a_cred;
6040 vattr_t *vap = ap->a_vap;
6046 xvap.xva_vattr = *vap;
6047 xvap.xva_vattr.va_mask |= AT_XVATTR;
6049 /* Convert chflags into ZFS-type flags. */
6050 /* XXX: what about SF_SETTABLE?. */
6051 XVA_SET_REQ(&xvap, XAT_IMMUTABLE);
6052 XVA_SET_REQ(&xvap, XAT_APPENDONLY);
6053 XVA_SET_REQ(&xvap, XAT_NOUNLINK);
6054 XVA_SET_REQ(&xvap, XAT_NODUMP);
6055 error = zfs_getattr(ap->a_vp, (vattr_t *)&xvap, 0, ap->a_cred, NULL);
6059 /* Convert ZFS xattr into chflags. */
6060 #define FLAG_CHECK(fflag, xflag, xfield) do { \
6061 if (XVA_ISSET_RTN(&xvap, (xflag)) && (xfield) != 0) \
6062 fflags |= (fflag); \
6064 FLAG_CHECK(SF_IMMUTABLE, XAT_IMMUTABLE,
6065 xvap.xva_xoptattrs.xoa_immutable);
6066 FLAG_CHECK(SF_APPEND, XAT_APPENDONLY,
6067 xvap.xva_xoptattrs.xoa_appendonly);
6068 FLAG_CHECK(SF_NOUNLINK, XAT_NOUNLINK,
6069 xvap.xva_xoptattrs.xoa_nounlink);
6070 FLAG_CHECK(UF_NODUMP, XAT_NODUMP,
6071 xvap.xva_xoptattrs.xoa_nodump);
6073 *vap = xvap.xva_vattr;
6074 vap->va_flags = fflags;
6079 zfs_freebsd_setattr(ap)
6080 struct vop_setattr_args /* {
6082 struct vattr *a_vap;
6083 struct ucred *a_cred;
6086 vnode_t *vp = ap->a_vp;
6087 vattr_t *vap = ap->a_vap;
6088 cred_t *cred = ap->a_cred;
6093 vattr_init_mask(vap);
6094 vap->va_mask &= ~AT_NOSET;
6097 xvap.xva_vattr = *vap;
6099 zflags = VTOZ(vp)->z_pflags;
6101 if (vap->va_flags != VNOVAL) {
6102 zfsvfs_t *zfsvfs = VTOZ(vp)->z_zfsvfs;
6105 if (zfsvfs->z_use_fuids == B_FALSE)
6106 return (EOPNOTSUPP);
6108 fflags = vap->va_flags;
6109 if ((fflags & ~(SF_IMMUTABLE|SF_APPEND|SF_NOUNLINK|UF_NODUMP)) != 0)
6110 return (EOPNOTSUPP);
6112 * Unprivileged processes are not permitted to unset system
6113 * flags, or modify flags if any system flags are set.
6114 * Privileged non-jail processes may not modify system flags
6115 * if securelevel > 0 and any existing system flags are set.
6116 * Privileged jail processes behave like privileged non-jail
6117 * processes if the security.jail.chflags_allowed sysctl is
6118 * is non-zero; otherwise, they behave like unprivileged
6121 if (secpolicy_fs_owner(vp->v_mount, cred) == 0 ||
6122 priv_check_cred(cred, PRIV_VFS_SYSFLAGS, 0) == 0) {
6124 (ZFS_IMMUTABLE | ZFS_APPENDONLY | ZFS_NOUNLINK)) {
6125 error = securelevel_gt(cred, 0);
6131 * Callers may only modify the file flags on objects they
6132 * have VADMIN rights for.
6134 if ((error = VOP_ACCESS(vp, VADMIN, cred, curthread)) != 0)
6137 (ZFS_IMMUTABLE | ZFS_APPENDONLY | ZFS_NOUNLINK)) {
6141 (SF_IMMUTABLE | SF_APPEND | SF_NOUNLINK)) {
6146 #define FLAG_CHANGE(fflag, zflag, xflag, xfield) do { \
6147 if (((fflags & (fflag)) && !(zflags & (zflag))) || \
6148 ((zflags & (zflag)) && !(fflags & (fflag)))) { \
6149 XVA_SET_REQ(&xvap, (xflag)); \
6150 (xfield) = ((fflags & (fflag)) != 0); \
6153 /* Convert chflags into ZFS-type flags. */
6154 /* XXX: what about SF_SETTABLE?. */
6155 FLAG_CHANGE(SF_IMMUTABLE, ZFS_IMMUTABLE, XAT_IMMUTABLE,
6156 xvap.xva_xoptattrs.xoa_immutable);
6157 FLAG_CHANGE(SF_APPEND, ZFS_APPENDONLY, XAT_APPENDONLY,
6158 xvap.xva_xoptattrs.xoa_appendonly);
6159 FLAG_CHANGE(SF_NOUNLINK, ZFS_NOUNLINK, XAT_NOUNLINK,
6160 xvap.xva_xoptattrs.xoa_nounlink);
6161 FLAG_CHANGE(UF_NODUMP, ZFS_NODUMP, XAT_NODUMP,
6162 xvap.xva_xoptattrs.xoa_nodump);
6165 return (zfs_setattr(vp, (vattr_t *)&xvap, 0, cred, NULL));
6169 zfs_freebsd_rename(ap)
6170 struct vop_rename_args /* {
6171 struct vnode *a_fdvp;
6172 struct vnode *a_fvp;
6173 struct componentname *a_fcnp;
6174 struct vnode *a_tdvp;
6175 struct vnode *a_tvp;
6176 struct componentname *a_tcnp;
6179 vnode_t *fdvp = ap->a_fdvp;
6180 vnode_t *fvp = ap->a_fvp;
6181 vnode_t *tdvp = ap->a_tdvp;
6182 vnode_t *tvp = ap->a_tvp;
6185 ASSERT(ap->a_fcnp->cn_flags & (SAVENAME|SAVESTART));
6186 ASSERT(ap->a_tcnp->cn_flags & (SAVENAME|SAVESTART));
6188 error = zfs_rename(fdvp, ap->a_fcnp->cn_nameptr, tdvp,
6189 ap->a_tcnp->cn_nameptr, ap->a_fcnp->cn_cred, NULL, 0);
6204 zfs_freebsd_symlink(ap)
6205 struct vop_symlink_args /* {
6206 struct vnode *a_dvp;
6207 struct vnode **a_vpp;
6208 struct componentname *a_cnp;
6209 struct vattr *a_vap;
6213 struct componentname *cnp = ap->a_cnp;
6214 vattr_t *vap = ap->a_vap;
6216 ASSERT(cnp->cn_flags & SAVENAME);
6218 vap->va_type = VLNK; /* FreeBSD: Syscall only sets va_mode. */
6219 vattr_init_mask(vap);
6221 return (zfs_symlink(ap->a_dvp, ap->a_vpp, cnp->cn_nameptr, vap,
6222 ap->a_target, cnp->cn_cred, cnp->cn_thread));
6226 zfs_freebsd_readlink(ap)
6227 struct vop_readlink_args /* {
6230 struct ucred *a_cred;
6234 return (zfs_readlink(ap->a_vp, ap->a_uio, ap->a_cred, NULL));
6238 zfs_freebsd_link(ap)
6239 struct vop_link_args /* {
6240 struct vnode *a_tdvp;
6242 struct componentname *a_cnp;
6245 struct componentname *cnp = ap->a_cnp;
6247 ASSERT(cnp->cn_flags & SAVENAME);
6249 return (zfs_link(ap->a_tdvp, ap->a_vp, cnp->cn_nameptr, cnp->cn_cred, NULL, 0));
6253 zfs_freebsd_inactive(ap)
6254 struct vop_inactive_args /* {
6256 struct thread *a_td;
6259 vnode_t *vp = ap->a_vp;
6261 zfs_inactive(vp, ap->a_td->td_ucred, NULL);
6266 zfs_freebsd_reclaim(ap)
6267 struct vop_reclaim_args /* {
6269 struct thread *a_td;
6272 vnode_t *vp = ap->a_vp;
6273 znode_t *zp = VTOZ(vp);
6274 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
6278 /* Destroy the vm object and flush associated pages. */
6279 vnode_destroy_vobject(vp);
6282 * z_teardown_inactive_lock protects from a race with
6283 * zfs_znode_dmu_fini in zfsvfs_teardown during
6286 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
6287 if (zp->z_sa_hdl == NULL)
6291 rw_exit(&zfsvfs->z_teardown_inactive_lock);
6299 struct vop_fid_args /* {
6305 return (zfs_fid(ap->a_vp, (void *)ap->a_fid, NULL));
6309 zfs_freebsd_pathconf(ap)
6310 struct vop_pathconf_args /* {
6313 register_t *a_retval;
6319 error = zfs_pathconf(ap->a_vp, ap->a_name, &val, curthread->td_ucred, NULL);
6321 *ap->a_retval = val;
6322 else if (error == EOPNOTSUPP)
6323 error = vop_stdpathconf(ap);
6328 zfs_freebsd_fifo_pathconf(ap)
6329 struct vop_pathconf_args /* {
6332 register_t *a_retval;
6336 switch (ap->a_name) {
6337 case _PC_ACL_EXTENDED:
6339 case _PC_ACL_PATH_MAX:
6340 case _PC_MAC_PRESENT:
6341 return (zfs_freebsd_pathconf(ap));
6343 return (fifo_specops.vop_pathconf(ap));
6348 * FreeBSD's extended attributes namespace defines file name prefix for ZFS'
6349 * extended attribute name:
6352 * system freebsd:system:
6353 * user (none, can be used to access ZFS fsattr(5) attributes
6354 * created on Solaris)
6357 zfs_create_attrname(int attrnamespace, const char *name, char *attrname,
6360 const char *namespace, *prefix, *suffix;
6362 /* We don't allow '/' character in attribute name. */
6363 if (strchr(name, '/') != NULL)
6365 /* We don't allow attribute names that start with "freebsd:" string. */
6366 if (strncmp(name, "freebsd:", 8) == 0)
6369 bzero(attrname, size);
6371 switch (attrnamespace) {
6372 case EXTATTR_NAMESPACE_USER:
6374 prefix = "freebsd:";
6375 namespace = EXTATTR_NAMESPACE_USER_STRING;
6379 * This is the default namespace by which we can access all
6380 * attributes created on Solaris.
6382 prefix = namespace = suffix = "";
6385 case EXTATTR_NAMESPACE_SYSTEM:
6386 prefix = "freebsd:";
6387 namespace = EXTATTR_NAMESPACE_SYSTEM_STRING;
6390 case EXTATTR_NAMESPACE_EMPTY:
6394 if (snprintf(attrname, size, "%s%s%s%s", prefix, namespace, suffix,
6396 return (ENAMETOOLONG);
6402 * Vnode operating to retrieve a named extended attribute.
6405 zfs_getextattr(struct vop_getextattr_args *ap)
6408 IN struct vnode *a_vp;
6409 IN int a_attrnamespace;
6410 IN const char *a_name;
6411 INOUT struct uio *a_uio;
6413 IN struct ucred *a_cred;
6414 IN struct thread *a_td;
6418 zfsvfs_t *zfsvfs = VTOZ(ap->a_vp)->z_zfsvfs;
6419 struct thread *td = ap->a_td;
6420 struct nameidata nd;
6423 vnode_t *xvp = NULL, *vp;
6426 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
6427 ap->a_cred, ap->a_td, VREAD);
6431 error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
6438 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
6446 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW | MPSAFE, UIO_SYSSPACE, attrname,
6448 error = vn_open_cred(&nd, &flags, 0, 0, ap->a_cred, NULL);
6450 NDFREE(&nd, NDF_ONLY_PNBUF);
6453 if (error == ENOENT)
6458 if (ap->a_size != NULL) {
6459 error = VOP_GETATTR(vp, &va, ap->a_cred);
6461 *ap->a_size = (size_t)va.va_size;
6462 } else if (ap->a_uio != NULL)
6463 error = VOP_READ(vp, ap->a_uio, IO_UNIT, ap->a_cred);
6466 vn_close(vp, flags, ap->a_cred, td);
6473 * Vnode operation to remove a named attribute.
6476 zfs_deleteextattr(struct vop_deleteextattr_args *ap)
6479 IN struct vnode *a_vp;
6480 IN int a_attrnamespace;
6481 IN const char *a_name;
6482 IN struct ucred *a_cred;
6483 IN struct thread *a_td;
6487 zfsvfs_t *zfsvfs = VTOZ(ap->a_vp)->z_zfsvfs;
6488 struct thread *td = ap->a_td;
6489 struct nameidata nd;
6492 vnode_t *xvp = NULL, *vp;
6495 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
6496 ap->a_cred, ap->a_td, VWRITE);
6500 error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
6507 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
6514 NDINIT_ATVP(&nd, DELETE, NOFOLLOW | LOCKPARENT | LOCKLEAF | MPSAFE,
6515 UIO_SYSSPACE, attrname, xvp, td);
6518 NDFREE(&nd, NDF_ONLY_PNBUF);
6521 if (error == ENOENT)
6525 error = VOP_REMOVE(nd.ni_dvp, vp, &nd.ni_cnd);
6528 if (vp == nd.ni_dvp)
6538 * Vnode operation to set a named attribute.
6541 zfs_setextattr(struct vop_setextattr_args *ap)
6544 IN struct vnode *a_vp;
6545 IN int a_attrnamespace;
6546 IN const char *a_name;
6547 INOUT struct uio *a_uio;
6548 IN struct ucred *a_cred;
6549 IN struct thread *a_td;
6553 zfsvfs_t *zfsvfs = VTOZ(ap->a_vp)->z_zfsvfs;
6554 struct thread *td = ap->a_td;
6555 struct nameidata nd;
6558 vnode_t *xvp = NULL, *vp;
6561 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
6562 ap->a_cred, ap->a_td, VWRITE);
6566 error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
6573 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
6574 LOOKUP_XATTR | CREATE_XATTR_DIR);
6580 flags = FFLAGS(O_WRONLY | O_CREAT);
6581 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW | MPSAFE, UIO_SYSSPACE, attrname,
6583 error = vn_open_cred(&nd, &flags, 0600, 0, ap->a_cred, NULL);
6585 NDFREE(&nd, NDF_ONLY_PNBUF);
6593 error = VOP_SETATTR(vp, &va, ap->a_cred);
6595 VOP_WRITE(vp, ap->a_uio, IO_UNIT | IO_SYNC, ap->a_cred);
6598 vn_close(vp, flags, ap->a_cred, td);
6605 * Vnode operation to retrieve extended attributes on a vnode.
6608 zfs_listextattr(struct vop_listextattr_args *ap)
6611 IN struct vnode *a_vp;
6612 IN int a_attrnamespace;
6613 INOUT struct uio *a_uio;
6615 IN struct ucred *a_cred;
6616 IN struct thread *a_td;
6620 zfsvfs_t *zfsvfs = VTOZ(ap->a_vp)->z_zfsvfs;
6621 struct thread *td = ap->a_td;
6622 struct nameidata nd;
6623 char attrprefix[16];
6624 u_char dirbuf[sizeof(struct dirent)];
6627 struct uio auio, *uio = ap->a_uio;
6628 size_t *sizep = ap->a_size;
6630 vnode_t *xvp = NULL, *vp;
6631 int done, error, eof, pos;
6633 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
6634 ap->a_cred, ap->a_td, VREAD);
6638 error = zfs_create_attrname(ap->a_attrnamespace, "", attrprefix,
6639 sizeof(attrprefix));
6642 plen = strlen(attrprefix);
6649 error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
6654 * ENOATTR means that the EA directory does not yet exist,
6655 * i.e. there are no extended attributes there.
6657 if (error == ENOATTR)
6662 NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | LOCKSHARED | MPSAFE,
6663 UIO_SYSSPACE, ".", xvp, td);
6666 NDFREE(&nd, NDF_ONLY_PNBUF);
6672 auio.uio_iov = &aiov;
6673 auio.uio_iovcnt = 1;
6674 auio.uio_segflg = UIO_SYSSPACE;
6676 auio.uio_rw = UIO_READ;
6677 auio.uio_offset = 0;
6682 aiov.iov_base = (void *)dirbuf;
6683 aiov.iov_len = sizeof(dirbuf);
6684 auio.uio_resid = sizeof(dirbuf);
6685 error = VOP_READDIR(vp, &auio, ap->a_cred, &eof, NULL, NULL);
6686 done = sizeof(dirbuf) - auio.uio_resid;
6689 for (pos = 0; pos < done;) {
6690 dp = (struct dirent *)(dirbuf + pos);
6691 pos += dp->d_reclen;
6693 * XXX: Temporarily we also accept DT_UNKNOWN, as this
6694 * is what we get when attribute was created on Solaris.
6696 if (dp->d_type != DT_REG && dp->d_type != DT_UNKNOWN)
6698 if (plen == 0 && strncmp(dp->d_name, "freebsd:", 8) == 0)
6700 else if (strncmp(dp->d_name, attrprefix, plen) != 0)
6702 nlen = dp->d_namlen - plen;
6705 else if (uio != NULL) {
6707 * Format of extattr name entry is one byte for
6708 * length and the rest for name.
6710 error = uiomove(&nlen, 1, uio->uio_rw, uio);
6712 error = uiomove(dp->d_name + plen, nlen,
6719 } while (!eof && error == 0);
6728 zfs_freebsd_getacl(ap)
6729 struct vop_getacl_args /* {
6738 vsecattr_t vsecattr;
6740 if (ap->a_type != ACL_TYPE_NFS4)
6743 vsecattr.vsa_mask = VSA_ACE | VSA_ACECNT;
6744 if (error = zfs_getsecattr(ap->a_vp, &vsecattr, 0, ap->a_cred, NULL))
6747 error = acl_from_aces(ap->a_aclp, vsecattr.vsa_aclentp, vsecattr.vsa_aclcnt);
6748 if (vsecattr.vsa_aclentp != NULL)
6749 kmem_free(vsecattr.vsa_aclentp, vsecattr.vsa_aclentsz);
6755 zfs_freebsd_setacl(ap)
6756 struct vop_setacl_args /* {
6765 vsecattr_t vsecattr;
6766 int aclbsize; /* size of acl list in bytes */
6769 if (ap->a_type != ACL_TYPE_NFS4)
6772 if (ap->a_aclp->acl_cnt < 1 || ap->a_aclp->acl_cnt > MAX_ACL_ENTRIES)
6776 * With NFSv4 ACLs, chmod(2) may need to add additional entries,
6777 * splitting every entry into two and appending "canonical six"
6778 * entries at the end. Don't allow for setting an ACL that would
6779 * cause chmod(2) to run out of ACL entries.
6781 if (ap->a_aclp->acl_cnt * 2 + 6 > ACL_MAX_ENTRIES)
6784 error = acl_nfs4_check(ap->a_aclp, ap->a_vp->v_type == VDIR);
6788 vsecattr.vsa_mask = VSA_ACE;
6789 aclbsize = ap->a_aclp->acl_cnt * sizeof(ace_t);
6790 vsecattr.vsa_aclentp = kmem_alloc(aclbsize, KM_SLEEP);
6791 aaclp = vsecattr.vsa_aclentp;
6792 vsecattr.vsa_aclentsz = aclbsize;
6794 aces_from_acl(vsecattr.vsa_aclentp, &vsecattr.vsa_aclcnt, ap->a_aclp);
6795 error = zfs_setsecattr(ap->a_vp, &vsecattr, 0, ap->a_cred, NULL);
6796 kmem_free(aaclp, aclbsize);
6802 zfs_freebsd_aclcheck(ap)
6803 struct vop_aclcheck_args /* {
6812 return (EOPNOTSUPP);
6815 struct vop_vector zfs_vnodeops;
6816 struct vop_vector zfs_fifoops;
6817 struct vop_vector zfs_shareops;
6819 struct vop_vector zfs_vnodeops = {
6820 .vop_default = &default_vnodeops,
6821 .vop_inactive = zfs_freebsd_inactive,
6822 .vop_reclaim = zfs_freebsd_reclaim,
6823 .vop_access = zfs_freebsd_access,
6824 #ifdef FREEBSD_NAMECACHE
6825 .vop_lookup = vfs_cache_lookup,
6826 .vop_cachedlookup = zfs_freebsd_lookup,
6828 .vop_lookup = zfs_freebsd_lookup,
6830 .vop_getattr = zfs_freebsd_getattr,
6831 .vop_setattr = zfs_freebsd_setattr,
6832 .vop_create = zfs_freebsd_create,
6833 .vop_mknod = zfs_freebsd_create,
6834 .vop_mkdir = zfs_freebsd_mkdir,
6835 .vop_readdir = zfs_freebsd_readdir,
6836 .vop_fsync = zfs_freebsd_fsync,
6837 .vop_open = zfs_freebsd_open,
6838 .vop_close = zfs_freebsd_close,
6839 .vop_rmdir = zfs_freebsd_rmdir,
6840 .vop_ioctl = zfs_freebsd_ioctl,
6841 .vop_link = zfs_freebsd_link,
6842 .vop_symlink = zfs_freebsd_symlink,
6843 .vop_readlink = zfs_freebsd_readlink,
6844 .vop_read = zfs_freebsd_read,
6845 .vop_write = zfs_freebsd_write,
6846 .vop_remove = zfs_freebsd_remove,
6847 .vop_rename = zfs_freebsd_rename,
6848 .vop_pathconf = zfs_freebsd_pathconf,
6849 .vop_bmap = zfs_freebsd_bmap,
6850 .vop_fid = zfs_freebsd_fid,
6851 .vop_getextattr = zfs_getextattr,
6852 .vop_deleteextattr = zfs_deleteextattr,
6853 .vop_setextattr = zfs_setextattr,
6854 .vop_listextattr = zfs_listextattr,
6855 .vop_getacl = zfs_freebsd_getacl,
6856 .vop_setacl = zfs_freebsd_setacl,
6857 .vop_aclcheck = zfs_freebsd_aclcheck,
6858 .vop_getpages = zfs_freebsd_getpages,
6861 struct vop_vector zfs_fifoops = {
6862 .vop_default = &fifo_specops,
6863 .vop_fsync = zfs_freebsd_fsync,
6864 .vop_access = zfs_freebsd_access,
6865 .vop_getattr = zfs_freebsd_getattr,
6866 .vop_inactive = zfs_freebsd_inactive,
6867 .vop_read = VOP_PANIC,
6868 .vop_reclaim = zfs_freebsd_reclaim,
6869 .vop_setattr = zfs_freebsd_setattr,
6870 .vop_write = VOP_PANIC,
6871 .vop_pathconf = zfs_freebsd_fifo_pathconf,
6872 .vop_fid = zfs_freebsd_fid,
6873 .vop_getacl = zfs_freebsd_getacl,
6874 .vop_setacl = zfs_freebsd_setacl,
6875 .vop_aclcheck = zfs_freebsd_aclcheck,
6879 * special share hidden files vnode operations template
6881 struct vop_vector zfs_shareops = {
6882 .vop_default = &default_vnodeops,
6883 .vop_access = zfs_freebsd_access,
6884 .vop_inactive = zfs_freebsd_inactive,
6885 .vop_reclaim = zfs_freebsd_reclaim,
6886 .vop_fid = zfs_freebsd_fid,
6887 .vop_pathconf = zfs_freebsd_pathconf,